max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
MultipleLinearRegression/untitled0.py
|
shubham-shinde/Machine-Learning
| 0
|
12782251
|
<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import the data
dataset = pd.read_csv('50_Startups.csv')
#index location = iloc
#dataset is a 2d matrix
#select all row in first column
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:,4].values
#data preprocessing
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder = LabelEncoder()
#to remove deficulty of graph plotting between string and int we convert string to int
#labelencoder converts cities name to 1,2 & 3
X[:,3] = labelencoder.fit_transform(X[:,3])
onehotencoder = OneHotEncoder(categorical_features = [3])
X = onehotencoder.fit_transform(X).toarray()
#hotencoder converted 1, 2 & 3 into binary inputs making 3 new rows and deleting one
X=X[:,1:]
#now splitting data into trainning and testing data
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 1/3, random_state=0)
#now import Linear Regression model from scikit learn
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
plt.scatter(X_train[:,-1], y_train, color="red")
plt.plot(X_train[:,-1], regressor.predict(X_train), color='blue')
plt.title('salary vs Eperience (Training set)')
plt.ylabel('salary')
plt.show()
| 3.640625
| 4
|
angalabiri/shop/models/cartmodels.py
|
dark-codr/ebiangala
| 1
|
12782252
|
<filename>angalabiri/shop/models/cartmodels.py<gh_stars>1-10
import random
import os
import math
import datetime
from decimal import Decimal
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.db import models
from category.models import Category, Tag
from ckeditor_uploader.fields import RichTextUploadingField
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator
from django.db.models import (
CASCADE,
SET_NULL,
BooleanField,
CharField,
DateField,
DateTimeField,
DecimalField,
EmailField,
FileField,
ForeignKey,
GenericIPAddressField,
ImageField,
IntegerField,
IPAddressField,
ManyToManyField,
OneToOneField,
Q,
SlugField,
URLField,
)
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel
# from angalabiri.shop.managers.cartmanagers import CartManager
from angalabiri.shop.models.productmodels import Product, ProductVariation
User = settings.AUTH_USER_MODEL
# Start Cart models
# class Cart(TimeStampedModel):
# user = ForeignKey(User, on_delete=SET_NULL, null=True, blank=True)
# products = ManyToManyField(Product, blank=True)
# subtotal = DecimalField(default=0.00, max_digits=100, decimal_places=2)
# total = DecimalField(default=0.00, max_digits=100, decimal_places=2)
# objects = CartManager()
# def __str__(self):
# return str(self.id)
# @property
# def is_digital(self):
# qs = self.products.all() #every product
# new_qs = qs.filter(is_digital=False) # every product that is not digial
# if new_qs.exists():
# return False
# return True
# End cart models
| 1.953125
| 2
|
src/model/models.py
|
tsoibet/task-management-site-2
| 0
|
12782253
|
<reponame>tsoibet/task-management-site-2
import enum
from datetime import datetime
from flask.json import dump
from database import db
from marshmallow import Schema, fields, post_load, validates_schema, ValidationError
from marshmallow.validate import Length, Range, OneOf
from flask_marshmallow import Marshmallow
from sqlalchemy.orm.attributes import flag_modified
class Status(enum.Enum):
TO_DO = 1
IN_PROGRESS = 2
DONE = 3
class Task(db.Model):
__tablename__ = 'task'
id = db.Column(db.Integer, primary_key=True, nullable=False)
title = db.Column(db.String(40), nullable=False)
detail = db.Column(db.Text, nullable=False)
status = db.Column(db.Enum(Status), nullable=False)
priority = db.Column(db.Integer, nullable=False)
deadlineness = db.Column(db.Boolean, nullable=False)
deadline = db.Column(db.DateTime, nullable=False)
created_at = db.Column(db.TIMESTAMP, server_default = db.func.now())
def __init__(self, title, detail, status, priority, deadlineness, deadline=None):
self.title = title
self.detail = detail
self.status = status
self.priority = priority
self.deadlineness = deadlineness
if deadlineness is True:
self.deadline = deadline
else:
self.deadline = datetime(9999,12,31,0,0,0)
def update_dict(self, dict):
for name, value in dict.items():
if name in self.__dict__ and name:
setattr(self, name, value)
if dict['deadlineness'] is False:
self.deadline = datetime(9999,12,31,0,0,0)
ma = Marshmallow()
class TaskSchema(ma.SQLAlchemyAutoSchema):
status = fields.Method("get_status")
def get_status(self, obj):
return obj.status.name
class Meta:
model = Task
class CreateTaskInputSchema(Schema):
title = fields.Str(required=True, validate=Length(max=40))
detail = fields.Str(required=True, validate=Length(max=1024))
status = fields.Str(required=True, validate=OneOf(
[Status.TO_DO.name, Status.IN_PROGRESS.name, Status.DONE.name]))
priority = fields.Int(required=True, validate=Range(min=1))
deadlineness = fields.Boolean(required=True)
deadline = fields.DateTime()
@validates_schema
def validate_deadline(self, data, **kwargs):
errors = {}
if data["deadlineness"] is True and "deadline" not in data:
errors["deadline"] = ["deadline is required."]
raise ValidationError(errors)
@post_load
def make_task(self, data, **kwargs):
return Task(**data)
class QueryTaskInputSchema(Schema):
page = fields.Int(validate=Range(min=1), missing=1)
per = fields.Int(validate=Range(min=1), missing=20)
sort = fields.Str(missing="status")
| 2.265625
| 2
|
inventarios/forms.py
|
angiealejo/CoreM
| 1
|
12782254
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Django:
from django.forms import ModelForm
from django.forms import TextInput
from django.forms import Select
# from django.forms import SelectMultiple
from django.forms import ChoiceField
from django.forms import Textarea
from django.forms import CharField
from django.forms import Form
from django.forms import URLInput
# Modelos:
from .models import Almacen
from .models import Articulo
from .models import UdmArticulo
from .models import MovimientoCabecera
# from .models import MovimientoDetalle
from .models import MOVIMIENTO_ESTADO
from .models import MOVIMIENTO_CLASIFICACION
from .models import MOVIMIENTO_TIPO
# from .models import SeccionAlmacen
from trabajos.models import OrdenTrabajo
from seguridad.models import Profile
ALMACEN_ESTADO = (
('ACT', 'ACTIVO'),
('DES', 'DESHABILITADO'),
)
# ----------------- ALMACEN ----------------- #
class AlmacenForm(ModelForm):
class Meta:
model = Almacen
fields = [
'clave',
'descripcion',
'estado',
]
widgets = {
'clave': TextInput(attrs={'class': 'form-control input-sm'}),
'descripcion': TextInput(attrs={'class': 'form-control input-sm'}),
'estado': Select(attrs={'class': 'form-control input-sm'}),
}
# ----------------- UDM ODOMETRO ----------------- #
class UdmArticuloForm(ModelForm):
class Meta:
model = UdmArticulo
fields = '__all__'
widgets = {
'clave': TextInput(attrs={'class': 'form-control input-sm'}),
'descripcion': TextInput(attrs={'class': 'form-control input-sm'}),
}
# ----------------- ARTICULO ----------------- #
class ArticuloFilterForm(ModelForm):
class Meta:
model = Articulo
fields = [
'clave',
'descripcion',
'tipo',
'clave_jde',
'estado',
'imagen',
'marca',
'modelo',
'numero_parte',
]
widgets = {
'clave': TextInput(attrs={'class': 'form-control input-sm'}),
'descripcion': TextInput(attrs={'class': 'form-control input-sm'}),
'tipo': Select(attrs={'class': 'form-control input-sm'}),
'clave_jde': TextInput(attrs={'class': 'form-control input-sm'}),
}
class ArticuloForm(ModelForm):
class Meta:
model = Articulo
fields = [
'clave',
'descripcion',
'tipo',
'udm',
'observaciones',
'url',
'marca',
'modelo',
'numero_parte',
'stock_seguridad',
'stock_minimo',
'stock_maximo',
'clave_jde',
'estado',
'imagen',
]
widgets = {
'clave': TextInput(attrs={'class': 'form-control input-sm'}),
'descripcion': TextInput(attrs={'class': 'form-control input-sm'}),
'tipo': Select(attrs={'class': 'form-control input-sm'}),
'udm': Select(attrs={'class': 'form-control input-sm'}),
'observaciones': Textarea(
attrs={'class': 'form-control input-sm'}),
'url': URLInput(attrs={'class': 'form-control input-sm', 'placeholder':'http://www.website.com'}),
'stock_seguridad': TextInput(
attrs={'class': 'form-control input-sm', 'type': 'number'}),
'stock_minimo': TextInput(
attrs={'class': 'form-control input-sm', 'type': 'number'}),
'stock_maximo': TextInput(
attrs={'class': 'form-control input-sm', 'type': 'number'}),
'clave_jde': TextInput(attrs={'class': 'form-control input-sm'}),
'estado': Select(attrs={'class': 'form-control input-sm'}),
'marca': TextInput(attrs={'class': 'form-control input-sm'}),
'modelo': TextInput(attrs={'class': 'form-control input-sm'}),
'numero_parte': TextInput(attrs={'class': 'form-control input-sm'}),
}
labels = {
'clave_jde': 'Clave JDE',
'stock_seguridad': 'Stock de Seguridad',
'numero_parte': 'No. Parte',
'stock_minimo': 'Stock Mínimo',
'stock_maximo': 'Stock Máximo',
'url': 'URL'
}
# ----------------- STOCK ----------------- #
class StockFilterForm(Form):
almacen = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
articulo = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
cantidad_menorque = CharField(
widget=TextInput(
attrs={'class': 'form-control input-sm'})
)
cantidad_mayorque = CharField(
widget=TextInput(
attrs={'class': 'form-control input-sm'})
)
def __init__(self, *args, **kwargs):
super(StockFilterForm, self).__init__(*args, **kwargs)
self.fields['articulo'].choices = self.obtener_Articulos()
self.fields['almacen'].choices = self.obtener_Almacenes()
def obtener_Articulos(self):
articulo = [('', 'Todos'), ]
registros = Articulo.objects.all()
for registro in registros:
if registro.clave is None:
clave = "-"
else:
clave = registro.clave
articulo.append(
(
registro.id,
"(%s) %s" % (clave, registro.descripcion)
)
)
return articulo
def obtener_Almacenes(self):
articulo = [('', 'Todos'), ]
registros = Almacen.objects.all()
for registro in registros:
articulo.append(
(
registro.id,
"(%s) %s" % (registro.clave, registro.descripcion)
)
)
return articulo
# ----------------- ENTRADAS ----------------- #
class EntradaSaldoFiltersForm(Form):
descripcion = CharField(
widget=TextInput(attrs={'class': 'form-control input-sm'})
)
almacen_destino = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm select2'}
)
)
fecha_inicio = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
fecha_fin = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
estado = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
def __init__(self, *args, **kwargs):
super(EntradaSaldoFiltersForm, self).__init__(*args, **kwargs)
self.fields['almacen_destino'].choices = self.get_Almacenes()
self.fields['estado'].choices = self.get_Estados(MOVIMIENTO_ESTADO)
def get_Almacenes(self):
almacen_destino = [('', '-------')]
registros = Almacen.objects.all()
for registro in registros:
almacen_destino.append(
(
registro.id,
"%s" % (registro.descripcion)
)
)
return almacen_destino
def get_Estados(self, _opciones):
opciones = [('', '-------')]
for registro in _opciones:
opciones.append(registro)
return opciones
class EntradaSaldoForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EntradaSaldoForm, self).__init__(*args, **kwargs)
self.fields['almacen_destino'].required = True
class Meta:
model = MovimientoCabecera
fields = [
'descripcion',
'almacen_destino',
'fecha',
]
widgets = {
'descripcion': TextInput(attrs={'class': 'form-control input-sm'}),
'almacen_destino': Select(
attrs={
'class': 'form-control input-sm'
}
),
'fecha': TextInput(attrs={'class': 'form-control input-sm',
'data-date-format': 'yyyy-mm-dd'}),
}
class EntradaCompraFiltersForm(Form):
descripcion = CharField(
widget=TextInput(attrs={'class': 'form-control input-sm'})
)
almacen_destino = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
fecha_inicio = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
fecha_fin = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
proveedor = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
persona_recibe = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
estado = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
def __init__(self, *args, **kwargs):
super(EntradaCompraFiltersForm, self).__init__(*args, **kwargs)
self.fields['almacen_destino'].choices = self.get_Almacenes()
self.fields['persona_recibe'].choices = self.get_Profiles()
self.fields['estado'].choices = self.get_Estados(MOVIMIENTO_ESTADO)
def get_Almacenes(self):
almacen_destino = [('', '-------')]
registros = Almacen.objects.all()
for registro in registros:
almacen_destino.append(
(
registro.id,
"%s" % (registro.descripcion)
)
)
return almacen_destino
def get_Profiles(self):
persona_recibe = [('', '-------')]
registros = Profile.objects.all()
for registro in registros:
persona_recibe.append(
(
registro.id,
registro.user.get_full_name()
)
)
return persona_recibe
def get_Estados(self, _opciones):
opciones = [('', '-------')]
for registro in _opciones:
opciones.append(registro)
return opciones
class EntradaCompraForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EntradaCompraForm, self).__init__(*args, **kwargs)
self.fields['almacen_destino'].required = True
class Meta:
model = MovimientoCabecera
fields = [
'descripcion',
'fecha',
'almacen_destino',
'proveedor',
'persona_recibe',
]
widgets = {
'descripcion': TextInput(attrs={'class': 'form-control input-sm'}),
'almacen_destino': Select(
attrs={'class': 'form-control input-sm'}
),
'fecha': TextInput(
attrs={
'class': 'form-control input-sm',
'data-date-format': 'yyyy-mm-dd'
}
),
'proveedor': TextInput(attrs={'class': 'form-control input-sm'}),
'persona_recibe': Select(
attrs={'class': 'form-control input-sm'}
),
}
class EntradaAjusteFiltersForm(Form):
descripcion = CharField(
widget=TextInput(attrs={'class': 'form-control input-sm'})
)
almacen_destino = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
fecha_inicio = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
fecha_fin = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
estado = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
def __init__(self, *args, **kwargs):
super(EntradaAjusteFiltersForm, self).__init__(*args, **kwargs)
self.fields['almacen_destino'].choices = self.get_Almacenes()
self.fields['estado'].choices = self.get_Estados(MOVIMIENTO_ESTADO)
def get_Almacenes(self):
almacen_destino = [('', '-------')]
registros = Almacen.objects.all()
for registro in registros:
almacen_destino.append(
(
registro.id,
"%s" % (registro.descripcion)
)
)
return almacen_destino
def get_Estados(self, _opciones):
opciones = [('', '-------')]
for registro in _opciones:
opciones.append(registro)
return opciones
class EntradaAjusteForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EntradaAjusteForm, self).__init__(*args, **kwargs)
self.fields['almacen_destino'].required = True
class Meta:
model = MovimientoCabecera
fields = [
'descripcion',
'almacen_destino',
'fecha',
]
widgets = {
'descripcion': TextInput(attrs={'class': 'form-control input-sm'}),
'almacen_destino': Select(
attrs={
'class': 'form-control input-sm'
}
),
'fecha': TextInput(attrs={'class': 'form-control input-sm',
'data-date-format': 'yyyy-mm-dd'}),
}
class EntradaTraspasoFiltersForm(Form):
estado = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
descripcion = CharField(
widget=TextInput(attrs={'class': 'form-control input-sm'})
)
almacen_origen = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
almacen_destino = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
fecha_inicio = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
fecha_fin = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
persona_entrega = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
persona_recibe = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
def __init__(self, *args, **kwargs):
super(EntradaTraspasoFiltersForm, self).__init__(*args, **kwargs)
self.fields['estado'].choices = self.get_Estados(MOVIMIENTO_ESTADO)
self.fields['almacen_origen'].choices = self.get_Almacenes()
self.fields['almacen_destino'].choices = self.get_Almacenes()
self.fields['persona_entrega'].choices = self.get_Profiles()
self.fields['persona_recibe'].choices = self.get_Profiles()
def get_Estados(self, _opciones):
opciones = [('', '-------')]
for registro in _opciones:
opciones.append(registro)
return opciones
def get_Almacenes(self):
almacen = [('', '-------')]
registros = Almacen.objects.all()
for registro in registros:
almacen.append(
(
registro.id,
"%s" % (registro.descripcion)
)
)
return almacen
def get_Profiles(self):
persona = [('', '-------')]
registros = Profile.objects.all()
for registro in registros:
persona.append(
(
registro.id,
registro.user.get_full_name()
)
)
return persona
# ----------------- MOVIMIENTOS ----------------- #
class InventarioFiltersForm(Form):
tipo = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
descripcion_encabezado = CharField(
widget=TextInput(attrs={'class': 'form-control input-sm'})
)
almacen_destino = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm select2'}
)
)
almacen_origen = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm select2'}
)
)
fecha_inicio = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
fecha_fin = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
estado = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
proveedor = CharField(
widget=TextInput(attrs={'class': 'form-control input-sm'})
)
persona_recibe = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm select2'}
)
)
persona_entrega = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm select2'}
)
)
articulo = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm select2'}
)
)
orden_trabajo = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm select2'}
)
)
clasificacion = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm select2'}
)
)
def __init__(self, *args, **kwargs):
super(InventarioFiltersForm, self).__init__(*args, **kwargs)
self.fields['tipo'].choices = self.get_Tipo(MOVIMIENTO_TIPO)
self.fields['almacen_destino'].choices = self.get_Almacenes()
self.fields['almacen_origen'].choices = self.get_Almacenes()
self.fields['persona_entrega'].choices = self.get_Profiles()
self.fields['persona_recibe'].choices = self.get_Profiles()
self.fields['orden_trabajo'].choices = self.get_Ordenes()
self.fields['estado'].choices = self.get_Estados(MOVIMIENTO_ESTADO)
self.fields['clasificacion'].choices = self.get_Clasificacion(
MOVIMIENTO_CLASIFICACION)
self.fields['articulo'].choices = self.get_Articulos()
def get_Tipo(self, _opciones):
opciones = [('', '-------')]
for registro in _opciones:
opciones.append(registro)
return opciones
def get_Almacenes(self):
almacen_destino = [('', '-------')]
registros = Almacen.objects.all()
for registro in registros:
almacen_destino.append(
(
registro.id,
"%s" % (registro.descripcion)
)
)
return almacen_destino
def get_Estados(self, _opciones):
opciones = [('', '-------')]
for registro in _opciones:
opciones.append(registro)
return opciones
def get_Profiles(self):
persona = [('', '-------')]
registros = Profile.objects.all()
for registro in registros:
persona.append(
(
registro.id,
registro.user.get_full_name()
)
)
return persona
def get_Ordenes(self):
orden_trabajo = [('', '-------')]
registros = OrdenTrabajo.objects.all()
for registro in registros:
value = "(%s) %s" % (registro.equipo, registro.descripcion)
orden_trabajo.append(
(
registro.id,
value
)
)
return orden_trabajo
def get_Clasificacion(self, _opciones):
opciones = [('', '-------')]
for registro in _opciones:
opciones.append(registro)
return opciones
def get_Articulos(self):
articulo = [('', '-------')]
registros = Articulo.objects.all()
for registro in registros:
if registro.clave is None:
clave = "-"
else:
clave = registro.clave
articulo.append(
(
registro.id,
"(%s) %s" % (clave, registro.descripcion)
)
)
return articulo
class InventarioForm(ModelForm):
class Meta:
model = MovimientoCabecera
fields = [
'tipo',
'clasificacion',
'descripcion',
'almacen_origen',
'almacen_destino',
'fecha',
'persona_recibe',
'persona_entrega',
'proveedor'
]
widgets = {
'tipo': Select(attrs={'class': 'form-control input-sm'}),
'clasificacion': Select(
attrs={
'class': 'form-control input-sm'
}
),
'descripcion': TextInput(attrs={'class': 'form-control input-sm'}),
'almacen_origen': Select(
attrs={
'class': 'form-control input-sm'
}
),
'almacen_destino': Select(
attrs={
'class': 'form-control input-sm'
}
),
'fecha': TextInput(attrs={'class': 'form-control input-sm',
'data-date-format': 'yyyy-mm-dd'}),
'persona_recibe': Select(
attrs={
'class': 'form-control input-sm'
}
),
'persona_entrega': Select(
attrs={
'class': 'form-control input-sm'
}
),
'proveedor': TextInput(
attrs={
'class': 'form-control input-sm'
}
),
}
labels = {
'clasificacion': 'Clasificación',
'descripcion': 'Descripción',
}
# ------------------------ SALIDAS -------------------------- #
class SalidaPersonalFiltersForm(Form):
descripcion = CharField(
widget=TextInput(attrs={'class': 'form-control input-sm'})
)
almacen_origen = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
fecha_inicio = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
fecha_fin = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
persona_entrega = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
persona_recibe = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
estado = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
def __init__(self, *args, **kwargs):
super(SalidaPersonalFiltersForm, self).__init__(*args, **kwargs)
self.fields['almacen_origen'].choices = self.get_Almacenes()
self.fields['persona_recibe'].choices = self.get_Profiles()
self.fields['persona_entrega'].choices = self.get_Profiles()
self.fields['estado'].choices = self.get_Estados(MOVIMIENTO_ESTADO)
def get_Almacenes(self):
almacen_destino = [('', '-------')]
registros = Almacen.objects.all()
for registro in registros:
almacen_destino.append(
(
registro.id,
"%s" % (registro.descripcion)
)
)
return almacen_destino
def get_Profiles(self):
persona_recibe = [('', '-------')]
registros = Profile.objects.all()
for registro in registros:
persona_recibe.append(
(
registro.id,
registro.user.get_full_name()
)
)
return persona_recibe
def get_Estados(self, _opciones):
opciones = [('', '-------')]
for registro in _opciones:
opciones.append(registro)
return opciones
class SalidaPersonalForm(ModelForm):
def __init__(self, *args, **kwargs):
super(SalidaPersonalForm, self).__init__(*args, **kwargs)
self.fields['almacen_origen'].required = True
self.fields['persona_entrega'].required = True
self.fields['persona_recibe'].required = True
class Meta:
model = MovimientoCabecera
fields = [
'descripcion',
'fecha',
'almacen_origen',
'persona_entrega',
'persona_recibe',
]
widgets = {
'descripcion': TextInput(attrs={'class': 'form-control input-sm'}),
'almacen_origen': Select(attrs={'class': 'form-control input-sm'}),
'fecha': TextInput(attrs={'class': 'form-control input-sm',
'data-date-format': 'yyyy-mm-dd'}),
'persona_entrega': Select(
attrs={
'class': 'form-control input-sm'
}
),
'persona_recibe': Select(attrs={'class': 'form-control input-sm'}),
}
class SalidaOrdenTrabajoFiltersForm(Form):
descripcion = CharField(
widget=TextInput(attrs={'class': 'form-control input-sm'})
)
almacen_origen = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
fecha_inicio = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
fecha_fin = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
persona_entrega = CharField(
widget=TextInput(attrs={'class': 'form-control input-sm'})
)
persona_recibe = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
estado = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
orden_trabajo = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
def __init__(self, *args, **kwargs):
super(SalidaOrdenTrabajoFiltersForm, self).__init__(*args, **kwargs)
self.fields['almacen_origen'].choices = self.get_Almacenes()
self.fields['persona_recibe'].choices = self.get_Profiles()
self.fields['estado'].choices = self.get_Estados(MOVIMIENTO_ESTADO)
self.fields['orden_trabajo'].choices = self.get_Ordenes()
def get_Almacenes(self):
almacen_destino = [('', '-------')]
registros = Almacen.objects.all()
for registro in registros:
almacen_destino.append(
(
registro.id,
"%s" % (registro.descripcion)
)
)
return almacen_destino
def get_Profiles(self):
persona_recibe = [('', '-------')]
registros = Profile.objects.all()
for registro in registros:
persona_recibe.append(
(
registro.id,
registro.user.get_full_name()
)
)
return persona_recibe
def get_Estados(self, _opciones):
opciones = [('', '-------')]
for registro in _opciones:
opciones.append(registro)
return opciones
def get_Ordenes(self):
orden_trabajo = [('', '-------')]
registros = OrdenTrabajo.objects.all()
for registro in registros:
orden_trabajo.append(
(
registro.id,
"(%s) %s" % (registro.id, registro.descripcion)
)
)
return orden_trabajo
class SalidaOrdenTrabajoForm(ModelForm):
def __init__(self, *args, **kwargs):
super(SalidaOrdenTrabajoForm, self).__init__(*args, **kwargs)
self.fields['orden_trabajo'].required = True
self.fields['almacen_origen'].required = True
self.fields['persona_entrega'].required = True
self.fields['persona_recibe'].required = True
class Meta:
model = MovimientoCabecera
fields = [
'descripcion',
'fecha',
'almacen_origen',
'persona_entrega',
'persona_recibe',
'orden_trabajo',
]
widgets = {
'descripcion': TextInput(attrs={'class': 'form-control input-sm'}),
'almacen_origen': Select(attrs={'class': 'form-control input-sm'}),
'fecha': TextInput(attrs={'class': 'form-control input-sm',
'data-date-format': 'yyyy-mm-dd'}),
'persona_entrega': Select(
attrs={
'class': 'form-control input-sm'
}
),
'persona_recibe': Select(attrs={'class': 'form-control input-sm'}),
'orden_trabajo': Select(attrs={'class': 'form-control input-sm'}),
}
class SalidaAjusteFiltersForm(Form):
descripcion = CharField(
widget=TextInput(attrs={'class': 'form-control input-sm'})
)
almacen_origen = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
fecha_inicio = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
fecha_fin = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
estado = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
def __init__(self, *args, **kwargs):
super(SalidaAjusteFiltersForm, self).__init__(*args, **kwargs)
self.fields['almacen_origen'].choices = self.get_Almacenes()
self.fields['estado'].choices = self.get_Estados(MOVIMIENTO_ESTADO)
def get_Almacenes(self):
almacen_destino = [('', '-------')]
registros = Almacen.objects.all()
for registro in registros:
almacen_destino.append(
(
registro.id,
"%s" % (registro.descripcion)
)
)
return almacen_destino
def get_Estados(self, _opciones):
opciones = [('', '-------')]
for registro in _opciones:
opciones.append(registro)
return opciones
class SalidaAjusteForm(ModelForm):
def __init__(self, *args, **kwargs):
super(SalidaAjusteForm, self).__init__(*args, **kwargs)
self.fields['almacen_origen'].required = True
class Meta:
model = MovimientoCabecera
fields = [
'descripcion',
'almacen_origen',
'fecha',
]
widgets = {
'descripcion': TextInput(attrs={'class': 'form-control input-sm'}),
'almacen_origen': Select(attrs={'class': 'form-control input-sm'}),
'fecha': TextInput(attrs={'class': 'form-control input-sm',
'data-date-format': 'yyyy-mm-dd'}),
}
class SalidaTraspasoFiltersForm(Form):
descripcion = CharField(
widget=TextInput(attrs={'class': 'form-control input-sm'})
)
almacen_origen = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
almacen_destino = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
fecha_inicio = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
fecha_fin = CharField(
widget=TextInput(attrs={'class': 'form-control pull-right input-sm',
'data-date-format': 'yyyy-mm-dd'})
)
persona_entrega = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
persona_recibe = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
estado = ChoiceField(
widget=Select(
attrs={'class': 'form-control input-sm'}
)
)
def __init__(self, *args, **kwargs):
super(SalidaTraspasoFiltersForm, self).__init__(*args, **kwargs)
self.fields['almacen_origen'].choices = self.get_Almacenes()
self.fields['almacen_destino'].choices = self.get_Almacenes()
self.fields['persona_entrega'].choices = self.get_Profiles()
self.fields['persona_recibe'].choices = self.get_Profiles()
self.fields['estado'].choices = self.get_Estados(MOVIMIENTO_ESTADO)
def get_Almacenes(self):
almacen = [('', '-------')]
registros = Almacen.objects.all()
for registro in registros:
almacen.append(
(
registro.id,
"%s" % (registro.descripcion)
)
)
return almacen
def get_Estados(self, _opciones):
opciones = [('', '-------')]
for registro in _opciones:
opciones.append(registro)
return opciones
def get_Profiles(self):
persona = [('', '-------')]
registros = Profile.objects.all()
for registro in registros:
persona.append(
(
registro.id,
registro.user.get_full_name()
)
)
return persona
class SalidaTraspasoForm(ModelForm):
def __init__(self, *args, **kwargs):
super(SalidaTraspasoForm, self).__init__(*args, **kwargs)
self.fields['almacen_origen'].required = True
self.fields['almacen_destino'].required = True
class Meta:
model = MovimientoCabecera
fields = [
'descripcion',
'almacen_origen',
'almacen_destino',
'persona_entrega',
'persona_recibe',
'fecha',
]
widgets = {
'descripcion': TextInput(attrs={'class': 'form-control input-sm'}),
'almacen_origen': Select(attrs={'class': 'form-control input-sm'}),
'almacen_destino': Select(
attrs={
'class': 'form-control input-sm'
}),
'persona_entrega': Select(
attrs={
'class': 'form-control input-sm'
}),
'persona_recibe': Select(attrs={'class': 'form-control input-sm'}),
'fecha': TextInput(
attrs={
'class': 'form-control input-sm',
'data-date-format': 'yyyy-mm-dd'
})
}
| 1.9375
| 2
|
test.py
|
Shito0907/cat_gen
| 1
|
12782255
|
import yaml
import argparse
from attrdict import AttrDict
from matplotlib import pyplot as plt
import torch
from torch.autograd import Variable
from models.generator import Generator
def test(params):
G = Generator(params.network.generator)
if params.restore.G:
G.load_state_dict(torch.load(params.restore.G))
gen_input = \
Variable(torch.FloatTensor(
1, params.network.generator.z_size,
1, 1
).normal_(0, 1))
torch_cat = G(gen_input)
np_cat = torch_cat.data.numpy()[0] / 2.0 + 0.5
np_cat = np_cat.transpose((1, 2, 0))
fig = plt.gcf()
fig.canvas.set_window_title('Random cat')
plt.imshow(np_cat)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='GAN testing script'
)
parser.add_argument('--conf', '-c', required=True,
help='a path to the configuration file')
args = parser.parse_args()
with open(args.conf, 'r') as stream:
try:
args = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
test(AttrDict(args))
| 2.1875
| 2
|
scrapers/astro_assigner.py
|
nseifert/splatalogue
| 0
|
12782256
|
<reponame>nseifert/splatalogue<filename>scrapers/astro_assigner.py
# -*- coding: utf-8 -*-
import pandas as pd
import MySQLdb as mysqldb
from MySQLdb import cursors
import numpy as np
import re
import easygui as eg
from tqdm import tqdm, tqdm_pandas
def init_sql_db():
def rd_pass():
return open('pass.pass','r').read()
HOST = "127.0.0.1"
LOGIN = "nseifert"
PASS = <PASSWORD>()
db = mysqldb.connect(host=HOST, user=LOGIN, passwd=PASS.strip(), port=3307, cursorclass=cursors.SSCursor)
db.autocommit(False)
print 'MySQL Login Successful.'
return db
def calc_rough_mass(formula):
# Look-up table for common elements:
masses = {'H': 1.0, 'D': 2.0, 'He': 4.0,
'B': 10.8, 'C': 12.0, 'N': 14.0, 'O': 16.0, 'F': 19.0,
'Na': 23.0, 'Mg': 24.3, 'Al': 27.0, 'Si': 28.0, 'P': 31.0, 'S': 32.0, 'Cl': 35.0,
'K': 39.0, 'Ti': 48.0, 'Fe': 56.0
}
mass = 0.0
for entry in re.findall(r'([A-Z][a-z]*)(\d*)', formula):
try:
ele_mass = masses[entry[0]]
if entry[1] != '':
ele_mass *= int(entry[1])
mass += ele_mass
except KeyError:
continue
return int(mass)
def read_raw_file(inp, fmt, delimiter, tag, skiprows=0):
linelist = {}
for key in fmt:
linelist[key] = []
for i, line in enumerate(inp):
if i <= skiprows-1:
continue
if line.split() is None:
continue
else:
temp = line.decode('unicode_escape').encode('ascii', 'ignore') # Gets rid of Unicode escape characters
if tag == 'shimajiri':
line_elements = {}
# Sanitize formulas
line_elements['El'] = temp.split()[0]
# Pull upper quantum number
m = re.search(r'\((.*?)\)', temp)
line_elements['qNu'] = re.findall(r'\d+', m.group(1))[0]
# Pull frequency
line_elements['Freq'] = float(re.sub(r'\(.*?\)', '', temp).split()[1])*1000
for key in fmt:
linelist[key].append(line_elements[key])
return pd.DataFrame.from_dict(linelist)
def read_vizier_file(inp, fmt, delimiter):
# Construct linelist result dictionary
linelist = {}
for key in fmt:
linelist[key] = []
atLineList = False
for line in inp:
if not line.strip(): # Blank line
continue
if line[0] == "#": # Comment
continue
if '--' in line: # Last line before linelist starts
atLineList = True
continue
if atLineList:
try:
for i, key in enumerate(fmt):
if len(line.strip().split(delimiter)) != len(fmt):
continue
else:
linelist[key].append(line.strip().split(delimiter)[i])
except IndexError:
print "\"" + line + "\""
raise
linelist['Freq'] = [float(f) for f in linelist['Freq']] # Convert from str to float
return pd.DataFrame.from_dict(linelist)
def push_raw_to_splat(astro_ll, meta, db, fuzzy_search=0, use_qn_mult=1, use_qn_sing=0, freq_tol=1.0, mass_tol=4.0, verbose=0):
if verbose:
filename = open('output.txt', 'w')
if not fuzzy_search:
species_id_global = {}
for idx, row in tqdm(astro_ll.iterrows(), total=astro_ll.shape[0]):
curs2 = db.cursor()
cmd = "SELECT line_id, orderedfreq, transition_in_space, species_id, quantum_numbers FROM main " \
"WHERE Lovas_NRAO = 1 AND (orderedfreq <= %s AND orderedfreq >= %s)"\
% (row['Freq'] + freq_tol, row['Freq'] - freq_tol)
curs2.execute(cmd)
res = curs2.fetchall()
num_results = len(res)
if not fuzzy_search:
if row['El'] not in species_id_global.keys():
species_id_lookup = {}
for rrow in res:
t_cursor = db.cursor()
cmd = "SELECT SPLAT_ID, chemical_name, s_name FROM species where species_id = %s" % rrow[3]
t_cursor.execute(cmd)
species_id_lookup[rrow[3]] = t_cursor.fetchall()
t_cursor.close()
if len(species_id_lookup.keys()) == 1:
species_id_global[row['El']] = species_id_lookup.keys()[0]
else:
selections = [str(k)+'\t'+'\t'.join([str(k) for k in v]) for k, v in species_id_lookup.iteritems()]
choice = eg.choicebox(msg='Multiple results for entry %s. Pick the matching splat entry.' % row['El'], choices=selections)
species_id_global[row['El']] = choice.split()[0]
selected_transitions = []
overlap_trans = False
updated_species_ids = set()
if num_results > 0:
for sql_row in res:
t_cursor = db.cursor()
cmdd = "SELECT SPLAT_ID FROM species WHERE species_id = %s" % sql_row[3]
t_cursor.execute(cmdd)
splat_id = t_cursor.fetchall()[0][0]
splat_mass = int(splat_id[:-2].lstrip("0"))
if verbose:
filename.write('\t'.join([str(splat_id), str(splat_mass), str(row['rough_mass'])])+"\n")
if str(sql_row[2]) == "1": # Transition already labeled
if verbose:
filename.write('Transition found for %s for splat_id %s\n' %(row['El'], splat_id))
continue
if np.abs(splat_mass - row['rough_mass']) <= mass_tol:
if num_results > 1:
if use_qn_mult:
if row['qNu'].split()[0] == sql_row[-1].split()[0]:
selected_transitions.append(sql_row)
updated_species_ids.add(sql_row[3])
elif not fuzzy_search:
if str(species_id_global[row['El']]) == str(sql_row[3]):
selected_transitions.append(sql_row)
updated_species_ids.add(sql_row[3])
if num_results == 1:
selected_transitions.append(sql_row)
updated_species_ids.add(sql_row[3])
t_cursor.close()
if len(selected_transitions) > 0: # Push updates to main
overlap_trans = True
for trans in selected_transitions:
curs2.execute("UPDATE main SET transition_in_space=1, source_Lovas_NIST=\"%s\", telescope_Lovas_NIST=\"%s\", obsref_Lovas_NIST=\"%s\" WHERE line_id = %s"
% (meta['source'], meta['tele'], meta['ref_short'], trans[0]))
if verbose:
filename.write('Frequency: %s \t # Results Raw: %i \t Selected Results: %i\n'
% (row['Freq'], num_results, len(selected_transitions)))
if len(selected_transitions) != 0:
filename.write('--------------\n')
for sel_row in selected_transitions:
filename.write('\t\t Line: %s \t Species ID: %s \t Splat Freq: %s\n\n'
% (sel_row[0], sel_row[2], sel_row[1]))
else:
filename.write('--------------\n')
filename.write('No lines found. Species: %s \t Formula: %s \t Rough Mass: %s \n' \
% (row['El'],row['El_parse'], row['rough_mass']))
# Update metadata for species that were updated
for species in updated_species_ids:
curs2.execute("SELECT Ref19, Date from species_metadata where species_id=%s ORDER BY Date DESC" % species)
try:
ref_data = curs2.fetchall()[0]
except IndexError: # Bad species_id?
print 'Bad ref data for species id # %s: ' % species
continue
if ref_data[0] == None or ref_data[0] == '':
to_write = "Astronomically observed transitions for this linelist have been marked using data from" \
" the following references"
if overlap_trans:
to_write += " (NOTE: Some transitions in the linelist " \
"are overlapping at typical astronomical linewidths." \
" All transitions within this typical tolerance have been marked as observed.)"
to_write += ": %s" % meta['ref_full']
else:
continue
# to_write = ref_data[0] + "; %s" % meta['ref_full']
curs2.execute("UPDATE species_metadata SET Ref19 = \"%s\" WHERE species_id=%s AND Date = \"%s\""
% (to_write, species, ref_data[1]))
curs2.close()
if verbose:
filename.close()
# Update linelist list with ref
# curs3 = db.cursor()
# curs3.execute("INSERT INTO lovas_references (Lovas_shortref, Lovas_fullref) VALUES (\"%s\", \"%s\")" %(meta['ref_short'], meta['ref_full']))
print 'Update completed successfully.'
def push_vizier_to_splat(astro_ll, meta, db, use_qn_mult=1, use_qn_sing=0, freq_tol=1.0, mass_tol=4, verbose=0):
if verbose:
filename = open('output.txt', 'w')
for idx, row in tqdm(astro_ll.iterrows(), total=astro_ll.shape[0]):
curs2 = db.cursor()
cmd = "SELECT line_id, orderedfreq, transition_in_space, species_id, quantum_numbers FROM main " \
"WHERE Lovas_NRAO = 1 AND (orderedfreq <= %s AND orderedfreq >= %s)"\
% (row['Freq'] + freq_tol, row['Freq'] - freq_tol)
curs2.execute(cmd)
res = curs2.fetchall()
num_results = len(res)
selected_transitions = []
overlap_trans = False
updated_species_ids = set()
if num_results > 0:
for sql_row in res:
curs2.execute("SELECT SPLAT_ID FROM species WHERE species_id = %s" % sql_row[3])
splat_id = curs2.fetchall()[0][0]
splat_mass = int(splat_id[:-2].lstrip("0"))
if verbose:
filename.write('\t'.join([str(splat_id), str(splat_mass), str(row['rough_mass'])])+"\n")
if sql_row[2] == "1" or sql_row[2] == 1: # Transition already labeled
continue
if np.abs(splat_mass - row['rough_mass']) <= mass_tol:
if num_results > 1:
if use_qn_mult:
if row['qNu'].split()[0] == sql_row[-1].split()[0]:
selected_transitions.append(sql_row)
updated_species_ids.add(sql_row[3])
else:
selected_transitions.append(sql_row)
updated_species_ids.add(sql_row[3])
if num_results == 1:
if use_qn_sing:
if row['qNu'].split()[0] == sql_row[-1].split()[0]:
selected_transitions.append(sql_row)
updated_species_ids.add(sql_row[3])
else:
selected_transitions.append(sql_row)
updated_species_ids.add(sql_row[3])
if len(selected_transitions) > 0: # Push updates to main
overlap_trans = True
for trans in selected_transitions:
curs2.execute("UPDATE main SET transition_in_space=1, source_Lovas_NIST=\"%s\", telescope_Lovas_NIST=\"%s\", obsref_Lovas_NIST=\"%s\" WHERE line_id = %s"
% (meta['source'], meta['tele'], meta['ref_short'], trans[0]))
if verbose:
filename.write('Frequency: %s \t # Results Raw: %i \t Selected Results: %i\n'
% (row['Freq'], num_results, len(selected_transitions)))
if len(selected_transitions) != 0:
filename.write('--------------\n')
for sel_row in selected_transitions:
filename.write('\t\t Line: %s \t Species ID: %s \t Splat Freq: %s\n\n'
% (sel_row[0], sel_row[2], sel_row[1]))
else:
filename.write('--------------\n')
filename.write('No lines found. Species: %s \t Formula: %s \t Rough Mass: %s \n' \
% (row['El'],row['El_parse'], row['rough_mass']))
# Update metadata for species that were updated
for species in updated_species_ids:
curs2.execute("SELECT Ref19, Date from species_metadata where species_id=%s ORDER BY Date DESC" % species)
try:
ref_data = curs2.fetchall()[0]
except IndexError: # Bad species_id?
print 'Bad ref data for species id # %s: ' % species
continue
if ref_data[0] == None or ref_data[0] == '':
to_write = "Astronomically observed transitions for this linelist have been marked using data from" \
" the following references"
if overlap_trans:
to_write += " (NOTE: Some transitions in the linelist " \
"are overlapping at typical astronomical linewidths." \
" All transitions within this typical tolerance have been marked as observed.)"
to_write += ": %s" % meta['ref_full']
else:
continue
# to_write = ref_data[0] + "; %s" % meta['ref_full']
curs2.execute("UPDATE species_metadata SET Ref19 = \"%s\" WHERE species_id=%s AND Date = \"%s\""
% (to_write, species, ref_data[1]))
curs2.close()
if verbose:
filename.close()
# Update linelist list with ref
curs3 = db.cursor()
curs3.execute("INSERT INTO lovas_references (Lovas_shortref, Lovas_fullref) VALUES (\"%s\", \"%s\")" %(meta['ref_short'], meta['ref_full']))
print 'Update completed successfully.'
if __name__ == "__main__":
path = "/home/nate/Downloads/Line Survey/Shimajiri 2015/FIR_3N_raw.txt"
fmt = ['El', 'qNu', 'Freq']
TOLERANCE = 1.0 # In units of linelist frequency, typically MHz.
linelist = read_raw_file(open(path, 'r'), fmt, ' ', tag='shimajiri')
#rint linelist
# linelist = read_vizier_file(open(path, 'r'), fmt, '\t')
# linelist['El'] = linelist['El'].apply(lambda x: x.replace('_','')).apply(lambda x: re.sub('\\^.*?\\^', '', x)).apply(lambda x: x.strip())
# linelist['qNu'] = linelist['qNu'].apply(lambda x: re.findall(r'\d+', x)[0])
def parse_formula(row):
return ''.join([x[0] if x[1] == '' else x[0]+x[1]
for x in re.findall(r'([A-Z][a-z]*)(\d*)', row)])
def sanitize_formula(form):
formula_chars_to_rid = ['+', '13', '18', '15', '17']
for val in formula_chars_to_rid:
form = form.replace(val, '')
return form
linelist['El_parse'] = linelist['El'].apply(parse_formula).apply(sanitize_formula)
linelist['rough_mass'] = linelist['El_parse'].apply(calc_rough_mass)
print linelist
db = init_sql_db()
print 'Connected to database successfully.'
cursor = db.cursor()
cursor.execute("USE splat")
# Enter metadata for astronomical study
fields = ['Telescope', 'Source', 'Full Reference', 'Reference Abbrev.']
# fieldValues = eg.multenterbox(msg="Enter metadata for astro survey.", title="Survey Metadata", fields=fields)
# metadata = {'tele': fieldValues[0], 'source': fieldValues[1],
# 'ref_full': fieldValues[2], 'ref_short': fieldValues[3]}
metadata = {'tele': 'ATSE', 'source': 'OMC 2-FIR-3N',
'ref_full': '<NAME>, <NAME>, <NAME>, <i>et. al</i>, <b>2015</b>, <i>ApJ. Suppl.</i> 221, 2.',
'ref_short': 'Shimajiri 2015'}
print 'Pushing updates from %s, telescope: %s, source: %s...' \
% (metadata['ref_short'], metadata['tele'], metadata['source'])
push_raw_to_splat(astro_ll=linelist, meta=metadata, db=db, verbose=0, fuzzy_search=0, use_qn_mult=1, mass_tol=3, freq_tol=2.0)
# push_vizier_to_splat(astro_ll=linelist, meta=metadata, db=db, use_qn_mult=1, mass_tol=4, freq_tol=1.0)
| 2.328125
| 2
|
site_scons/upload_thirdparty.py
|
neam/TideSDK
| 10
|
12782257
|
<gh_stars>1-10
# This file has been modified from its orginal sources.
#
# Copyright (c) 2012 Software in the Public Interest Inc (SPI)
# Copyright (c) 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2008-2012 Appcelerator Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from progressbar import ProgressBar
import sys
import os
import time
acctid = None
secret = None
if len(sys.argv) < 2:
print "Usage: upload_thirdparty.py <file.tgz> [<access key> <secret key>]"
exit()
else:
fname = sys.argv[1]
if len(sys.argv) >= 4:
acctid = sys.argv[2]
secret = sys.argv[3]
if acctid is None:
acctid = raw_input("AWS_ACCESS_KEY_ID: ").strip()
if secret is None:
secret = raw_input("AWS_SECRET_ACCESS_KEY: ").strip()
bucket = "kroll.appcelerator.com"
key = os.path.basename(fname)
conn = S3Connection(acctid, secret)
bucket = conn.get_bucket(bucket)
k = bucket.new_key(key)
pbar = ProgressBar().start()
try:
def progress_callback(current, total):
pbar.update(int(100 * (float(current) / float(total))))
k.set_contents_from_filename(fname, cb=progress_callback, num_cb=100, policy='public-read')
finally:
pbar.finish()
| 1.882813
| 2
|
tests/casefiles/toplevel_extracode.py
|
ardovm/wxGlade
| 225
|
12782258
|
<reponame>ardovm/wxGlade
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# frame extra code
# dialog extra code
# end wxGlade
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
# frame extra code before
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((400, 300))
self.SetTitle("frame")
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add((0, 0), 0, 0, 0)
self.SetSizer(sizer_1)
self.Layout()
# frame extra code after
self.Bind(wx.EVT_CLOSE, self.on_close_frame, self)
self.Bind(wx.EVT_MENU_CLOSE, self.on_menu_close_frame, self)
# end wxGlade
def on_close_frame(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_close_frame' not implemented!")
event.Skip()
def on_menu_close_frame(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_menu_close_frame' not implemented!")
event.Skip()
# end of class MyFrame
class MyDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialog.__init__
# dialog extra code before
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.SetTitle("dialog")
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add((0, 0), 0, 0, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
# dialog extra code after
self.Bind(wx.EVT_CLOSE, self.on_close_dialog, self)
# end wxGlade
def on_close_dialog(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler 'on_close_dialog' not implemented!")
event.Skip()
# end of class MyDialog
class MyMenuBar(wx.MenuBar):
def __init__(self, *args, **kwds):
# begin wxGlade: MyMenuBar.__init__
# menubar extracode before
wx.MenuBar.__init__(self, *args, **kwds)
# menubar extracode after
# end wxGlade
# end of class MyMenuBar
class wxToolBar(wx.ToolBar):
def __init__(self, *args, **kwds):
# begin wxGlade: wxToolBar.__init__
# toolbar extracode before
kwds["style"] = kwds.get("style", 0)
wx.ToolBar.__init__(self, *args, **kwds)
self.Realize()
# toolbar extracode after
# end wxGlade
# end of class wxToolBar
class MyDialog1(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialog1.__init__
# panel extracode before
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add((0, 0), 0, 0, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
# panel extracode after
# end wxGlade
# end of class MyDialog1
class MyApp(wx.App):
def OnInit(self):
self.frame = MyFrame(None, wx.ID_ANY, "")
self.SetTopWindow(self.frame)
self.frame.Show()
return True
# end of class MyApp
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop()
| 2.21875
| 2
|
src/final_exam/q_employee/hourly_employee.py
|
acc-cosc-1336/cosc-1336-spring-2018-MJBrady13
| 0
|
12782259
|
from employee import Employee
class HourlyEmployee(Employee):
def __init__(self, hourly_rate, worked_hours, employee_id, name):
self.hourly_rate = hourly_rate
self.worked_hours = worked_hours
def calculate(hourly_rate, worked_hours):
return (hourly_rate * worked_hours)
| 3.46875
| 3
|
algoritmo_genetico.py
|
higorsantana-omega/Algoritmo-Genetico-Python
| 0
|
12782260
|
<gh_stars>0
from random import random
class Produto():
def __init__(self, nome, espaco, valor):
self.nome = nome
self.espaco = espaco
self.valor = valor
class Individuo():
def __init__(self, espacos, valores, limite_espacos, geracao=0):
self.espacos = espacos
self.valores = valores
self.limites_espacos = limite_espacos
self.nota_avaliacao = 0
self.espaco_usado = 0
self.geracao = geracao
self.cromossomo = []
for i in range(len(espacos)):
if random() < 0.5:
self.cromossomo.append("0")
else:
self.cromossomo.append("1")
def avaliacao(self):
nota = 0
soma_espacos = 0
for i in range(len(self.cromossomo)):
if self.cromossomo[i] == '1':
nota += self.valores[i]
soma_espacos += self.espacos[i]
if soma_espacos > self.limites_espacos:
nota = 1
self.nota_avaliacao = nota
self.espaco_usado = soma_espacos
def crossover(self, outro_individuo):
corte = round(random() * len(self.cromossomo))
filho1 = outro_individuo.cromossomo[0:corte] + self.cromossomo[corte::]
filho2 = self.cromossomo[0:corte] + outro_individuo.cromossomo[corte::]
filhos = [Individuo(self.espacos, self.valores, self.limites_espacos, self.geracao + 1),
Individuo(self.espacos, self.valores, self.limites_espacos, self.geracao + 1)]
filhos[0].cromossomo = filho1
filhos[1].cromossomo = filho2
return filhos
if __name__ == "__main__":
# p1 = Produto("Iphone", 0.0000899, 2199.12)
lista_produtos = []
lista_produtos.append(Produto("Iphone", 0.0000899, 2199.12))
lista_produtos.append(Produto("Geladeira Dako", 0.751, 999.90))
lista_produtos.append(Produto("TV 55' ", 0.400, 4346.99))
lista_produtos.append(Produto("TV 50' ", 0.290, 3999.90))
lista_produtos.append(Produto("TV 42' ", 0.200, 2999.00))
lista_produtos.append(Produto("Notebook Dell", 0.00350, 2499.90))
lista_produtos.append(Produto("Ventilador Panasonic", 0.496, 199.90))
lista_produtos.append(Produto("Microondas Electrolux", 0.0424, 308.66))
lista_produtos.append(Produto("Microondas LG", 0.0544, 429.90))
lista_produtos.append(Produto("Microondas Panasonic", 0.0319, 299.29))
lista_produtos.append(Produto("Geladeira Brastemp", 0.635, 849.00))
lista_produtos.append(Produto("Geladeira Consul", 0.870, 1199.89))
lista_produtos.append(Produto("Notebook Lenovo", 0.498, 1999.90))
lista_produtos.append(Produto("Notebook Asus", 0.527, 3999.00))
# for produto in lista_produtos:
# print(produto.nome)
espacos = []
valores = []
nomes = []
for produto in lista_produtos:
espacos.append(produto.espaco)
valores.append(produto.valor)
nomes.append(produto.nome)
limite = 3
individuo1 = Individuo(espacos, valores, limite)
print("\nIndividuo 1")
for i in range(len(lista_produtos)):
if individuo1.cromossomo[i] == '1':
print(f"Nome: {lista_produtos[i].nome} R$ {lista_produtos[i].valor}")
individuo1.avaliacao()
print(f"Nota: {individuo1.nota_avaliacao}")
print(f"Espaço Usado: {individuo1.espaco_usado}")
individuo2 = Individuo(espacos, valores, limite)
print("\nIndividuo 2")
for i in range(len(lista_produtos)):
if individuo2.cromossomo[i] == '1':
print(f"Nome: {lista_produtos[i].nome} R$ {lista_produtos[i].valor}")
individuo2.avaliacao()
print(f"Nota: {individuo2.nota_avaliacao}")
print(f"Espaço Usado: {individuo2.espaco_usado}")
individuo1.crossover(individuo2)
| 3.0625
| 3
|
atcoder/abc/abc169/b.py
|
zaurus-yusya/atcoder
| 3
|
12782261
|
<gh_stars>1-10
a = int(input())
i = list(map(int, input().split()))
ans = 1
flag = 0
if i.count(0) > 0:
print(0)
flag = 1
if flag == 0:
for x in range(len(i)):
ans = ans * i[x]
if ans > 1000000000000000000:
flag = 1
break
if flag == 1:
print(-1)
elif flag == 2:
print(0)
else:
print(ans)
| 2.546875
| 3
|
src/atcoder/abc032/b/sol_0.py
|
kagemeka/competitive-programming
| 1
|
12782262
|
import typing
def main() -> typing.NoReturn:
s = input()
k = int(input())
print(len(set(s[i:i + k] for i in range(len(s)- k + 1))))
main()
| 2.921875
| 3
|
world/gen/layer/DefaultLandMassLayer.py
|
uuk0/mcpython-4
| 2
|
12782263
|
"""mcpython - a minecraft clone written in python licenced under MIT-licence
authors: uuk, xkcdjerry
original game by forgleman licenced under MIT-licence
minecraft by Mojang
blocks based on 1.14.4.jar of minecraft, downloaded on 20th of July, 2019"""
from world.gen.layer.Layer import Layer, LayerConfig
import globals as G
import random
import opensimplex
import world.Chunk
@G.worldgenerationhandler
class DefaultLandMassLayer(Layer):
noise1 = opensimplex.OpenSimplex(seed=random.randint(-10000, 10000))
noise2 = opensimplex.OpenSimplex(seed=random.randint(-10000, 10000))
noise3 = opensimplex.OpenSimplex(seed=random.randint(-10000, 10000))
@staticmethod
def normalize_config(config: LayerConfig):
if not hasattr(config, "masses"):
config.masses = ["land"]
# todo: add underwaterbiomes
if not hasattr(config, "size"):
config.size = 1
@staticmethod
def get_name() -> str:
return "landmass_default"
@staticmethod
def add_generate_functions_to_chunk(config: LayerConfig, chunk):
chunk.chunkgenerationtasks.append([DefaultLandMassLayer.generate_landmass, [chunk, config], {}])
@staticmethod
def generate_landmass(chunk, config):
cx, cz = chunk.position
landmap = chunk.get_value("landmassmap")
factor = 10**config.size
for x in range(cx*16, cx*16+16):
for z in range(cz*16, cz*16+16):
v = sum([DefaultLandMassLayer.noise1.noise2d(x/factor, z/factor) * 0.5 + 0.5,
DefaultLandMassLayer.noise2.noise2d(x/factor, z/factor) * 0.5 + 0.5,
DefaultLandMassLayer.noise3.noise2d(x/factor, z/factor) * 0.5 + 0.5]) / 3
v *= len(config.masses)
v = round(v)
if v == len(config.masses):
v = 0
landmap[(x, z)] = config.masses[v]
"""
if v < 0:
chunk.add_add_block_gen_task((x, 5, z), "minecraft:stone")
else:
chunk.add_add_block_gen_task((x, 5, z), "minecraft:dirt")
"""
authcode = world.Chunk.Chunk.add_default_attribute("landmassmap", DefaultLandMassLayer, {})
| 2.625
| 3
|
arsenyinfo/src/fit.py
|
cortwave/camera-model-identification
| 6
|
12782264
|
from functools import partial
from keras.optimizers import SGD
from fire import Fire
from src.dataset import KaggleDataset, PseudoDataset, ExtraDataset, DataCollection
from src.model import get_model, get_callbacks
from src.aug import augment
from src.utils import logger
def fit_once(model, model_name, loss, train, val, stage, n_fold, start_epoch, initial=False):
logger.info(f'Stage {stage} started: loss {loss}, fold {n_fold}')
steps_per_epoch = 500
validation_steps = 100
model.compile(optimizer=SGD(lr=0.01 if initial else 0.001, clipvalue=4, momentum=.9, nesterov=True),
loss=loss,
metrics=['accuracy'])
history = model.fit_generator(train,
epochs=500,
steps_per_epoch=steps_per_epoch,
validation_data=val,
workers=8,
max_queue_size=32,
use_multiprocessing=False,
validation_steps=validation_steps,
callbacks=get_callbacks(model_name, loss, stage, n_fold),
initial_epoch=start_epoch,
)
return model, max(history.epoch)
def fit_model(model_name, batch_size=16, n_fold=1, shape=384):
n_classes = 10
aug = partial(augment, expected_shape=shape)
n_fold = int(n_fold)
batch_size = int(batch_size)
model, preprocess = get_model(model_name, shape, n_classes=n_classes)
def make_config(**kwargs):
d = {'n_fold': int(n_fold),
'transform': preprocess,
'batch_size': batch_size,
'train': True,
'size': shape,
'aug': aug,
'center_crop_size': 0}
d.update(kwargs)
return d
kaggle_train = KaggleDataset(**make_config())
kaggle_val = KaggleDataset(**make_config(train=False))
pseudo_train = PseudoDataset(**make_config())
pseudo_val = PseudoDataset(**make_config(train=False))
extra_train = ExtraDataset(**make_config())
extra_val = ExtraDataset(**make_config(train=False))
frozen_epochs = 1
steps_per_epoch = 500
validation_steps = 50
loss = 'categorical_crossentropy'
model.compile(optimizer='adam', loss=loss, metrics=['accuracy'])
model.fit_generator(DataCollection(kaggle_train, extra_train, pseudo_train),
epochs=frozen_epochs,
steps_per_epoch=steps_per_epoch,
validation_data=DataCollection(kaggle_val, extra_val, pseudo_val),
workers=8,
validation_steps=validation_steps,
use_multiprocessing=False,
max_queue_size=50,
)
for layer in model.layers:
layer.trainable = True
epoch = frozen_epochs
for stage, (train, val) in enumerate(((DataCollection(kaggle_train, extra_train, pseudo_train),
DataCollection(kaggle_val, extra_val, pseudo_val)),
(DataCollection(kaggle_train, pseudo_train),
DataCollection(kaggle_val, pseudo_val)),
(DataCollection(pseudo_train), DataCollection(pseudo_val)),
)):
model, epoch = fit_once(model=model,
model_name=model_name,
loss='categorical_crossentropy',
train=train,
val=val,
start_epoch=epoch,
stage=stage,
n_fold=n_fold,
initial=True if stage > 0 else False
)
if __name__ == '__main__':
Fire(fit_model)
| 2.328125
| 2
|
Products/GSSearch/tests/test_searchmessage.py
|
groupserver/Products.GSSearch
| 0
|
12782265
|
##############################################################################
#
# Copyright (c) 2004, 2005 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Size adapters for testing
$Id: test_size.py 61072 2005-10-31 17:43:51Z philikon $
"""
import os, sys
if __name__ == '__main__':
execfile(os.path.join(sys.path[0], 'framework.py'))
from zope.interface import implements
from zope.app.size.interfaces import ISized
def test_emailmessage():
"""
Test searching
Set up:
>>> from zope.app.testing.placelesssetup import setUp, tearDown
>>> setUp()
>>> import Products.Five
>>> import Products.XWFMailingListManager
>>> from Products.GSSearch import queries
>>> from Products.Five import zcml
>>> from Products.ZSQLAlchemy.ZSQLAlchemy import manage_addZSQLAlchemy
>>> zcml.load_config('meta.zcml', Products.Five)
>>> zcml.load_config('permissions.zcml', Products.Five)
>>> zcml.load_config('configure.zcml', Products.XWFMailingListManager)
>>> alchemy_adaptor = manage_addZSQLAlchemy(app, 'zalchemy')
>>> alchemy_adaptor.manage_changeProperties( hostname='localhost',
... port=5433,
... username='onlinegroups',
... password='',
... dbtype='postgres',
... database='onlinegroups.net')
>>> mq = queries.MessageQuery( {}, alchemy_adaptor )
>>> from zope.component import createObject
Clean up:
>>> tearDown()
"""
def test_suite():
from Testing.ZopeTestCase import ZopeDocTestSuite
return ZopeDocTestSuite()
if __name__ == '__main__':
framework()
| 1.765625
| 2
|
tools/perf/metrics/timeline_interaction_record_unittest.py
|
anirudhSK/chromium
| 0
|
12782266
|
<filename>tools/perf/metrics/timeline_interaction_record_unittest.py
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from metrics import timeline_interaction_record
from telemetry.core.timeline import async_slice
class ParseTests(unittest.TestCase):
def testParse(self):
self.assertTrue(timeline_interaction_record.IsTimelineInteractionRecord(
'Interaction.Foo'))
self.assertTrue(timeline_interaction_record.IsTimelineInteractionRecord(
'Interaction.Foo/Bar'))
self.assertFalse(timeline_interaction_record.IsTimelineInteractionRecord(
'SomethingRandom'))
def CreateRecord(self, event_name):
s = async_slice.AsyncSlice(
'cat', event_name,
timestamp=1, duration=2)
return timeline_interaction_record.TimelineInteractionRecord(s)
def testCreate(self):
r = self.CreateRecord('Interaction.LogicalName')
self.assertEquals('LogicalName', r.logical_name)
self.assertEquals(False, r.is_smooth)
self.assertEquals(False, r.is_loading_resources)
r = self.CreateRecord('Interaction.LogicalName/is_smooth')
self.assertEquals('LogicalName', r.logical_name)
self.assertEquals(True, r.is_smooth)
self.assertEquals(False, r.is_loading_resources)
r = self.CreateRecord('Interaction.LogicalNameWith/Slash/is_smooth')
self.assertEquals('LogicalNameWith/Slash', r.logical_name)
self.assertEquals(True, r.is_smooth)
self.assertEquals(False, r.is_loading_resources)
r = self.CreateRecord(
'Interaction.LogicalNameWith/Slash/is_smooth,is_loading_resources')
self.assertEquals('LogicalNameWith/Slash', r.logical_name)
self.assertEquals(True, r.is_smooth)
self.assertEquals(True, r.is_loading_resources)
| 2.375
| 2
|
TwoTimeScaleHybridLearning/src/common/utils.py
|
sidsrini12/FURL_Sim
| 0
|
12782267
|
<reponame>sidsrini12/FURL_Sim
import common.config as cfg
from math import factorial as f
from models.cnn import CNN
from models.fcn import FCN
from models.svm import SVM
import networkx as nx
import numpy as np
import os
import pickle as pkl
from random import random
import sys
import torch
from torch.utils.data import TensorDataset, DataLoader
from torchvision import datasets, transforms
def booltype(arg):
return bool(int(arg))
def decimal_format(num, places=4):
return round(num, places)
def eut_add(eut_range):
return eut_range[0] \
if len(eut_range) == 1 \
else np.random.randint(
eut_range[0], eut_range[-1])
def flip(p):
return True if random() < p else False
def get_average_degree(graph):
return sum(dict(graph.degree()).values())/len(graph)
def get_dataloader(data, targets, batchsize, shuffle=True):
dataset = TensorDataset(data, targets)
return DataLoader(dataset, batch_size=batchsize,
shuffle=shuffle, num_workers=1)
def get_device(args):
USE_CUDA = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
return torch.device("cuda:2" if USE_CUDA else "cpu")
def get_laplacian(graph):
return nx.laplacian_matrix(graph).toarray()
def get_max_degree(graph):
return max(dict(graph.degree()).values())
def get_model(args):
if args.clf == 'cnn':
print('Initializing CNN...')
model_class = CNN
if args.clf == 'fcn':
print('Initializing FCN...')
model_class = FCN
elif args.clf == 'svm':
print('Initializing SVM...')
model_class = SVM
device = get_device(args)
model = model_class(args.input_size, args.output_size).to(device)
paths = get_paths(args)
model.load_state_dict(torch.load(paths.init_path))
print('Load init: {}'.format(paths.init_path))
loss_type = 'hinge' if args.clf == 'svm' else 'nll'
agg_type = 'laplacian' if args.paradigm == 'hl' else 'averaging'
print("Loss: {}\nAggregation: {}".format(loss_type, agg_type))
return model, loss_type, agg_type
def get_data_path(ckpt_path, args):
return '{}/{}_{}/data/n_classes_per_node_{}_stratify_{}' \
'_uniform_{}_repeat_{}.pkl'.format(
ckpt_path, args.dataset, args.num_workers, args.non_iid,
args.stratify, args.uniform_data, args.repeat)
def get_eut_schedule(args):
if not args.eut_range:
return list(range(1, args.epochs+1))
if args.tau_max:
return [min(args.eut_range), args.epochs]
eut_schedule = [0]
np.random.seed(args.eut_seed)
add = eut_add(args.eut_range)
while eut_schedule[-1] + add < args.epochs:
eut_schedule.append(eut_schedule[-1] + add)
add = eut_add(args.eut_range)
return eut_schedule[1:] + [args.epochs]
def get_lut_schedule(args):
if not args.lut_intv:
return []
lut_schedule = [0]
while lut_schedule[-1] + args.lut_intv < args.epochs:
lut_schedule.append(lut_schedule[-1] + args.lut_intv)
return lut_schedule[1:]
def get_paths(args):
ckpt_path = cfg.ckpt_path
folder = '{}_{}'.format(args.dataset, args.num_workers)
if args.dry_run:
model_name = 'debug'
else:
model_name = 'clf_{}_paradigm_{}_uniform_{}_non_iid_{}' \
'_num_workers_{}_lr_{}_decay_{}_batch_{}'.format(
args.clf, args.paradigm, args.uniform_data, args.non_iid,
args.num_workers, args.lr, args.decay,
args.batch_size)
if args.paradigm in ['fp', 'hl']:
if args.lut_intv:
model_name += '_eut_{}_lut_{}_rounds_{}'.format(
args.eut_range[0], args.lut_intv, args.rounds)
else:
model_name += '_delta_{}_zeta_{}_beta_{}_mu_{}_phi_{}_factor_{}'.format(
args.delta, args.zeta, args.beta, args.mu,
args.phi, args.factor)
if args.tau_max:
model_name += '_T1_{}_Tmax_{}_E_{}_D_{}'.format(
min(args.eut_range), args.tau_max, args.e_frac, args.d_frac)
elif args.eut_range and not args.lut_intv:
model_name += '_eut_range_{}'.format('_'.join(map(str, args.eut_range)))
if args.cs:
model_name += '_cs_{}'.format('_'.join(map(str, args.cs)))
if args.channel == 1:
model_name += '_csi'
elif args.channel == 2:
model_name += '_nocsi'
paths = {}
paths['model_name'] = model_name
paths['log_file'] = '{}/{}/logs/{}.log'.format(
ckpt_path, folder, model_name)
paths['init_path'] = '{}/{}/{}_{}.init'.format(
ckpt_path, 'init', args.dataset, args.clf)
paths['best_path'] = os.path.join(
ckpt_path, folder, 'models', model_name + '.best')
paths['stop_path'] = os.path.join(
ckpt_path, folder, 'models', model_name + '.stop')
paths['data_path'] = get_data_path(ckpt_path, args)
paths['plot_path'] = '{}/{}/plots/{}.jpg'.format(
ckpt_path, folder, model_name)
paths['hist_path'] = '{}/{}/history/{}.pkl'.format(
ckpt_path, folder, model_name)
paths['aux_path'] = '{}/{}/history/{}_aux.pkl'.format(
ckpt_path, folder, model_name)
return Struct(**paths)
def get_rho(graph, num_nodes, factor):
max_d = get_max_degree(graph)
d = 1/(factor*max_d)
L = get_laplacian(graph)
V = np.eye(num_nodes) - d*L
Z = V-(1/num_nodes)
return get_spectral_radius(Z)
def get_spectral_radius(matrix):
eig, _ = np.linalg.eig(matrix)
return max(eig)
def get_testloader(dataset, batch_size, shuffle=True):
kwargs = {}
if dataset == 'mnist':
return torch.utils.data.DataLoader(
datasets.MNIST(cfg.data_path, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=shuffle, **kwargs)
elif dataset == 'cifar':
return torch.utils.data.DataLoader(
datasets.CIFAR10(cfg.data_path, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))])),
batch_size=batch_size, shuffle=shuffle, **kwargs)
elif dataset == 'fmnist':
return torch.utils.data.DataLoader(
datasets.FashionMNIST(cfg.data_path, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.2861,),
(0.3530,))])),
batch_size=batch_size, shuffle=shuffle, **kwargs)
def get_trainloader(dataset, batch_size, shuffle=True):
kwargs = {}
if dataset == 'mnist':
return torch.utils.data.DataLoader(
datasets.MNIST(cfg.data_path, train=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=shuffle, **kwargs)
elif dataset == 'cifar':
return torch.utils.data.DataLoader(
datasets.CIFAR10(cfg.data_path, train=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))])),
batch_size=batch_size, shuffle=shuffle, **kwargs)
elif dataset == 'fmnist':
return torch.utils.data.DataLoader(
datasets.FashionMNIST(cfg.data_path, train=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.2861,),
(0.3530,))])),
batch_size=batch_size, shuffle=shuffle, **kwargs)
def history_parser(dataset, num_nodes, history):
h = pkl.load(
open('../ckpts/{}_{}/history/{}'.format(
dataset, num_nodes, history), 'rb'))
if len(h) == 8:
x_ax, y_ax, l_test, rounds, eps, eta_phi, beta, mu = h
else:
x_ax, y_ax, l_test, rounds, eps, eta_phi = h
return x_ax, y_ax, l_test
def in_range(elem, upper, lower):
return (elem >= lower) and (elem <= upper)
def init_logger(log_file, dry_run=False):
print("Logging: ", log_file)
std_out = sys.stdout
if not dry_run:
log_file = open(log_file, 'w')
sys.stdout = log_file
return log_file, std_out
def nCr(n, r):
return f(n)//f(r)//f(n-r)
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
| 2.109375
| 2
|
QuantitativeEditing/parameter_screen.py
|
wjs018/QuantitativeEditing
| 21
|
12782268
|
<reponame>wjs018/QuantitativeEditing
import gc
import scenedetect as sd
from moviepy.editor import *
if __name__ == '__main__':
# Specify video location here
video_file = '/media/unraid/Datasets/QuantitativeEditing/To Analyze/Bad Lip Reading_2018_Sample of My Pasta.mkv'
outfile_dir = '/media/unraid/Datasets/QuantitativeEditing/Parameter Screen/'
outfile_prefix = 'Bad Lip Reading_2018_Sample of My Pasta_'
# First, load into a video manager
video_mgr = sd.VideoManager([video_file])
stats_mgr = sd.stats_manager.StatsManager()
scene_mgr = sd.SceneManager(stats_mgr)
# Specify range to vary for threshold value
for threshold in range(22, 41):
# Try a couple different minimum scene lengths for each threshold
for min_scene_len in [5, 10, 15]:
# Add a content detector
scene_mgr.add_detector(
sd.ContentDetector(threshold=threshold,
min_scene_len=min_scene_len))
# Get the starting timecode
base_timecode = video_mgr.get_base_timecode()
# Start the video manager
video_mgr.set_downscale_factor(1)
video_mgr.start()
# Detect the scenes
scene_mgr.detect_scenes(frame_source=video_mgr)
# Retrieve scene list
scene_mgr_list = scene_mgr.get_scene_list(base_timecode)
# Initialize scene list for analysis
scene_list = []
# Build our list from the frame_timecode objects
for scene in scene_mgr_list:
start_frame, end_frame = scene
start_frame = start_frame.frame_num
scene_list.append(start_frame)
# Extract some info
video_fps = end_frame.framerate
frames_read = end_frame.frame_num
frames_processed = frames_read
# Convert detected scenes to time
scene_list_msec = [(1000.0 * x) / float(video_fps)
for x in scene_list]
# Reset the detector for next iteration
video_mgr.release()
video_mgr.reset()
scene_mgr.clear_detectors()
scene_mgr.clear()
# Pull music video file into moviepy
mv_clip = VideoFileClip(video_file)
W, H = mv_clip.size
# Initialize some variables
scene = 0
previous_scene_msec = 0
textclip_list = []
# Loop over list of scenes, creating TextClips for each scene
for scene_idx in range(len(scene_list_msec) + 1):
# Each iteration is the same except for the final scene which is
# handled separately in the else statement
if scene_idx != len(scene_list_msec):
# Calculate duration of the scene in seconds
duration = (scene_list_msec[scene_idx] -
previous_scene_msec) / 1000
# Record ending time of scene for the next loop
previous_scene_msec = scene_list_msec[scene_idx]
# Make the video clips of the numbers
txtclip = (TextClip("%03d" % scene_idx, fontsize=288,
color='white', font='FreeMono-Bold',
stroke_color='black', stroke_width=5).
set_pos('center').
set_duration(duration).set_opacity(0.6))
# Add the clip to a list of all the TextClips
textclip_list.append(txtclip)
# Last scene needs special treatment
else:
# Calculate the total duration of the video
total_duration_msec = frames_read / float(video_fps) * 1000
# Calculate the duration of the final scene
clip_duration = (total_duration_msec -
previous_scene_msec) / 1000
# Create the TextClip for the final scene
txtclip = (TextClip("%03d" % scene_idx, fontsize=288,
color='white', font='FreeMono-Bold',
stroke_color='black', stroke_width=5).
set_pos('center').
set_duration(clip_duration).
set_opacity(0.6))
# Add it to the list of other TextClips
textclip_list.append(txtclip)
# Play the TextClips one after the other
final_textclip = concatenate_videoclips(textclip_list).set_pos('center')
# Play the TextClips over the original video
final_video = CompositeVideoClip([mv_clip, final_textclip],
size=(W, H))
# Save resulting video to file, formatting name to avoid overwrites
outfile_name = outfile_prefix + (str(threshold) + '_' +
str(min_scene_len) + '.mp4')
outfile = os.path.join(outfile_dir, outfile_name)
final_video.write_videofile(outfile,
fps=video_fps,
preset='ultrafast')
# Having some memory overflow problems on my laptop, deleting some
# variables and forcing garbage collection fixes that
del txtclip
del textclip_list
del final_textclip
del final_video
gc.collect()
| 2.671875
| 3
|
pavement.py
|
acolinisi/h5py
| 1
|
12782269
|
from paver.easy import *
import os
DLLS = ['h5py_hdf5.dll', 'h5py_hdf5_hl.dll', 'szip.dll', 'zlib.dll']
@task
def release_unix():
sh('python setup.py clean')
sh('python setup.py configure --reset --hdf5-version=1.8.4')
sh('python setup.py build -f')
sh('python setup.py test')
sh('python setup.py sdist')
print("Unix release done. Distribution tar file is in dist/")
@task
def release_windows():
for pyver in (27, 34):
exe = r'C:\Python%d\Python.exe' % pyver
hdf5 = r'c:\hdf5\Python%d' % pyver
sh('%s setup.py clean' % exe)
sh('%s setup.py configure --reset --hdf5-version=1.8.13 --hdf5=%s' % (exe, hdf5))
for dll in DLLS:
sh('copy c:\\hdf5\\Python%d\\bin\\%s h5py /Y' % (pyver, dll))
sh('%s setup.py build -f' % exe)
sh('%s setup.py test' % exe)
sh('%s setup.py bdist_wininst' % exe)
print ("Windows exe release done. Distribution files are in dist/")
for dll in DLLS:
os.unlink('h5py\\%s' % dll)
@task
@consume_args
def git_summary(options):
sh('git log --no-merges --pretty=oneline --abbrev-commit %s..HEAD'%options.args[0])
sh('git shortlog -s -n %s..HEAD'%options.args[0])
| 2.03125
| 2
|
TE_model.py
|
AllenInstitute/coupledAE
| 5
|
12782270
|
<reponame>AllenInstitute/coupledAE
# -----------------------------------------------
# 5341 exclusive, 3585 matched, total 8926 in T
# -----------------------------------------------
# 0 exclusive, 3585 matched, total 3585 in E
import argparse
import os
import pdb
import re
import socket
import sys
import timeit
import numpy as np
import scipy.io as sio
import tensorflow as tf
from tensorflow.python.keras.callbacks import Callback, ModelCheckpoint, CSVLogger
from tensorflow.python.keras.layers import BatchNormalization, Dense, Dropout, Input, Lambda
from tensorflow.python.keras.losses import mean_squared_error as mse
from tensorflow.python.keras.models import Model
from datagen import DatagenTE, dataset_50fold
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=100, type=int, help="Coupling strength")
parser.add_argument("--n_paired_per_batch",default=100, type=int, help="Number of paired examples")
parser.add_argument("--cvset" ,default=0, type=int, help="50-fold cross validation set number")
parser.add_argument("--p_dropT", default=0.5, type=float, help="Dropout rate T arm")
parser.add_argument("--p_dropE", default=0.1, type=float, help="Dropout rate E arm")
parser.add_argument("--stdE", default=0.05, type=float, help="Gaussian noise sigma E arm")
parser.add_argument("--fc_dimT", default=[50,50,50,50], type=int, help="List of dims for T fc layers", nargs = '+')
parser.add_argument("--fc_dimE", default=[60,60,60,60], type=int, help="List of dims for E fc layers", nargs = '+')
parser.add_argument("--latent_dim", default=3, type=int, help="Number of latent dims")
parser.add_argument("--recon_strT", default=1.0, type=float, help="Reconstruction strength T arm")
parser.add_argument("--recon_strE", default=0.1, type=float, help="Reconstruction strength E arm")
parser.add_argument("--cpl_str", default=10.0, type=float, help="Coupling strength")
parser.add_argument("--n_epoch", default=2000, type=int, help="Number of epochs to train")
parser.add_argument("--steps_per_epoch", default=500, type=int, help="Number of gradient steps per epoch")
parser.add_argument("--run_iter", default=0, type=int, help="Run-specific id")
parser.add_argument("--model_id", default='crossval', type=str, help="Model-specific id")
parser.add_argument("--exp_name", default='patchseq_v2_noadapt',type=str, help="Experiment set")
def main(batch_size=100, n_paired_per_batch=100, cvset=0,
p_dropT=0.5, p_dropE=0.1, stdE=0.05,
fc_dimT=[50,50,50,50],fc_dimE=[60,60,60,60],latent_dim=3,
recon_strT=1.0, recon_strE=0.1, cpl_str=10.0,
n_epoch=2000, steps_per_epoch = 500,
run_iter=0, model_id='crossval_noadaptloss',exp_name='patchseq_v2_noadapt'):
train_dat, val_dat, train_ind_T, train_ind_E, val_ind, dir_pth = dataset_50fold(exp_name=exp_name,cvset=cvset)
train_generator = DatagenTE(dataset=train_dat, batch_size=batch_size, n_paired_per_batch=n_paired_per_batch, steps_per_epoch=steps_per_epoch)
chkpt_save_period = 1e7
#Architecture parameters ------------------------------
input_dim = [train_dat['T'].shape[1],train_dat['E'].shape[1]]
#'_fcT_' + '-'.join(map(str, fc_dimT)) + \
#'_fcE_' + '-'.join(map(str, fc_dimE)) + \
fileid = model_id + \
'_rT_' + str(recon_strT) + \
'_rE_' + str(recon_strE) + \
'_cs_' + str(cpl_str) + \
'_pdT_' + str(p_dropT) + \
'_pdE_' + str(p_dropE) + \
'_sdE_' + str(stdE) + \
'_bs_' + str(batch_size) + \
'_np_' + str(n_paired_per_batch) + \
'_se_' + str(steps_per_epoch) +\
'_ne_' + str(n_epoch) + \
'_cv_' + str(cvset) + \
'_ri_' + str(run_iter)
fileid = fileid.replace('.', '-')
print(fileid)
out_actfcn = ['elu','linear']
def add_gauss_noise(x):
'''Injects additive gaussian noise independently into each element of input x'''
x_noisy = x + tf.random.normal(shape=tf.shape(x), mean=0., stddev=stdE, dtype = tf.float32)
return tf.keras.backend.in_train_phase(x_noisy, x)
#Model inputs -----------------------------------------
M = {}
M['in_ae_0'] = Input(shape=(input_dim[0],), name='in_ae_0')
M['in_ae_1'] = Input(shape=(input_dim[1],), name='in_ae_1')
M['ispaired_ae_0'] = Input(shape=(1,), name='ispaired_ae_0')
M['ispaired_ae_1'] = Input(shape=(1,), name='ispaired_ae_1')
#Transcriptomics arm---------------------------------------------------------------------------------
M['dr_ae_0'] = Dropout(p_dropT, name='dr_ae_0')(M['in_ae_0'])
X = 'dr_ae_0'
for j, units in enumerate(fc_dimT):
Y = 'fc'+ format(j,'02d') +'_ae_0'
M[Y] = Dense(units, activation='elu', name=Y)(M[X])
X = Y
M['ldx_ae_0'] = Dense(latent_dim, activation='linear',name='ldx_ae_0')(M[X])
M['ld_ae_0'] = BatchNormalization(scale = False, center = False ,epsilon = 1e-10, momentum = 0.99, name='ld_ae_0')(M['ldx_ae_0'])
X = 'ld_ae_0'
for j, units in enumerate(reversed(fc_dimT)):
Y = 'fc'+ format(j+len(fc_dimT),'02d') +'_ae_0'
M[Y] = Dense(units, activation='elu', name=Y)(M[X])
X = Y
M['ou_ae_0'] = Dense(input_dim[0], activation=out_actfcn[0], name='ou_ae_0')(M[X])
#Electrophysiology arm--------------------------------------------------------------------------------
M['no_ae_1'] = Lambda(add_gauss_noise,name='no_ae_1')(M['in_ae_1'])
M['dr_ae_1'] = Dropout(p_dropE, name='dr_ae_1')(M['no_ae_1'])
X = 'dr_ae_1'
for j, units in enumerate(fc_dimE):
Y = 'fc'+ format(j,'02d') +'_ae_1'
M[Y] = Dense(units, activation='elu', name=Y)(M[X])
X = Y
M['ldx_ae_1'] = Dense(latent_dim, activation='linear',name='ldx_ae_1')(M[X])
M['ld_ae_1'] = BatchNormalization(scale = False, center = False ,epsilon = 1e-10, momentum = 0.99, name='ld_ae_1')(M['ldx_ae_1'])
X = 'ld_ae_1'
for j, units in enumerate(reversed(fc_dimE)):
Y = 'fc'+ format(j+len(fc_dimE),'02d') +'_ae_1'
M[Y] = Dense(units, activation='elu', name=Y)(M[X])
X = Y
M['ou_ae_1'] = Dense(input_dim[1], activation=out_actfcn[1], name='ou_ae_1')(M[X])
cplAE = Model(inputs=[M['in_ae_0'], M['in_ae_1'], M['ispaired_ae_0'], M['ispaired_ae_1']],
outputs=[M['ou_ae_0'], M['ou_ae_1'],M['ld_ae_0'], M['ld_ae_1']])
def coupling_loss(zi, pairedi, zj, pairedj):
'''Minimum singular value based loss.
\n SVD is calculated over all datapoints
\n MSE is calculated over only `paired` datapoints'''
batch_size = tf.shape(zi)[0]
paired_i = tf.reshape(pairedi, [tf.shape(pairedi)[0],])
paired_j = tf.reshape(pairedj, [tf.shape(pairedj)[0],])
zi_paired = tf.boolean_mask(zi, tf.equal(paired_i, 1.0))
zj_paired = tf.boolean_mask(zj, tf.equal(paired_j, 1.0))
vars_j_ = tf.square(tf.linalg.svd(zj - tf.reduce_mean(zj, axis=0), compute_uv=False))/tf.cast(batch_size - 1, tf.float32)
vars_j = tf.where(tf.math.is_nan(vars_j_), tf.zeros_like(vars_j_) + tf.cast(1e-1,dtype=tf.float32), vars_j_)
L_ij = tf.compat.v1.losses.mean_squared_error(zi_paired, zj_paired)/tf.maximum(tf.reduce_min(vars_j, axis=None),tf.cast(1e-2,dtype=tf.float32))
def loss(y_true, y_pred):
#Adaptive version:#tf.multiply(tf.stop_gradient(L_ij), L_ij)
return L_ij
return loss
#Create loss dictionary
loss_dict = {'ou_ae_0': mse, 'ou_ae_1': mse,
'ld_ae_0': coupling_loss(zi=M['ld_ae_0'], pairedi=M['ispaired_ae_0'],zj=M['ld_ae_1'], pairedj=M['ispaired_ae_1']),
'ld_ae_1': coupling_loss(zi=M['ld_ae_1'], pairedi=M['ispaired_ae_1'],zj=M['ld_ae_0'], pairedj=M['ispaired_ae_0'])}
#Loss weights dictionary
loss_wt_dict = {'ou_ae_0': recon_strT,
'ou_ae_1': recon_strE,
'ld_ae_0': cpl_str,
'ld_ae_1': cpl_str}
#Add loss definitions to the model
cplAE.compile(optimizer='adam', loss=loss_dict, loss_weights=loss_wt_dict)
#Checkpoint function definitions
checkpoint_cb = ModelCheckpoint(filepath=(dir_pth['checkpoint']+fileid + '-checkpoint-' + '{epoch:04d}' + '.h5'),
verbose=1, save_best_only=False, save_weights_only=True,
mode='auto', period=chkpt_save_period)
val_in = {'in_ae_0': val_dat['T'],
'in_ae_1': val_dat['E'],
'ispaired_ae_0': val_dat['T_ispaired'],
'ispaired_ae_1': val_dat['E_ispaired']}
val_out = {'ou_ae_0': val_dat['T'],
'ou_ae_1': val_dat['E'],
'ld_ae_0': np.zeros((val_dat['T'].shape[0], latent_dim)),
'ld_ae_1': np.zeros((val_dat['E'].shape[0], latent_dim))}
#Custom callback object
log_cb = CSVLogger(filename=dir_pth['logs']+fileid+'.csv')
last_checkpoint_epoch = 0
start_time = timeit.default_timer()
cplAE.fit_generator(train_generator,
validation_data=(val_in,val_out),
epochs=n_epoch,
max_queue_size=100,
use_multiprocessing=False, workers=1,
initial_epoch=last_checkpoint_epoch,
verbose=2, callbacks=[checkpoint_cb,log_cb])
elapsed = timeit.default_timer() - start_time
print('-------------------------------')
print('Training time:',elapsed)
print('-------------------------------')
#Saving weights
cplAE.save_weights(dir_pth['result']+fileid+'-modelweights'+'.h5')
matsummary = {}
matsummary['cvset'] = cvset
matsummary['val_ind'] = val_ind
matsummary['train_ind_T'] = train_ind_T
matsummary['train_ind_E'] = train_ind_E
#Trained model predictions
i = 0
encoder = Model(inputs=M['in_ae_'+str(i)], outputs=M['ld_ae_'+str(i)])
matsummary['z_val_'+str(i)] = encoder.predict({'in_ae_'+str(i): val_dat['T']})
matsummary['z_train_'+str(i)] = encoder.predict({'in_ae_'+str(i): train_dat['T']})
i = 1
encoder = Model(inputs=M['in_ae_'+str(i)], outputs=M['ld_ae_'+str(i)])
matsummary['z_val_'+str(i)] = encoder.predict({'in_ae_'+str(i): val_dat['E']})
matsummary['z_train_'+str(i)] = encoder.predict({'in_ae_'+str(i): train_dat['E']})
sio.savemat(dir_pth['result']+fileid+'-summary', matsummary)
return
if __name__ == "__main__":
args = parser.parse_args()
main(**vars(args))
| 1.820313
| 2
|
Programmers/src/12934/solution.py
|
lstar2397/algorithms
| 0
|
12782271
|
<reponame>lstar2397/algorithms
def solution(n):
x = n ** 0.5
if x == int(x):
return (x + 1) ** 2
else:
return -1
| 3.28125
| 3
|
utils/utils.py
|
luowensheng/MCN
| 130
|
12782272
|
"""Miscellaneous utility functions."""
from functools import reduce
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
import spacy
import re
import cv2
import time
from keras_bert.tokenizer import Tokenizer
from keras_bert.loader import load_trained_model_from_checkpoint, load_vocabulary
from keras_bert import extract_embeddings
import os
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def get_bert_input(text,vocabs,max_len=512):
tokenizer = Tokenizer(vocabs, cased=False)
token=[]
segment=[]
token, segment = tokenizer.encode(text, max_len=max_len)
token.append(token)
segment.append(segment)
token.extend([0] * (max_len - len(token)))
segment.extend([0] * (max_len - len(token)))
return [token,segment]
def seq_to_list(s):
'''
note: 2018.10.3
use for process sentences
'''
t_str = s.lower()
for i in [r'\?', r'\!', r'\'', r'\"', r'\$', r'\:', r'\@', r'\(', r'\)', r'\,', r'\.', r'\;', r'\n']:
t_str = re.sub(i, '', t_str)
for i in [r'\-', r'\/']:
t_str = re.sub(i, ' ', t_str)
q_list = re.sub(r'\?', '', t_str.lower()).split(' ')
q_list = list(filter(lambda x: len(x) > 0, q_list))
return q_list
def qlist_to_vec(max_length, q_list,embed):
'''
note: 2018.10.3
use for process sentences
'''
glove_matrix = []
glove_dict = {}
q_len = len(q_list)
if q_len > max_length:
q_len = max_length
for i in range(max_length):
if i < q_len:
w=q_list[i]
if w not in glove_dict:
glove_dict[w]=embed(u'%s'%w).vector
glove_matrix.append(glove_dict[w])
else:
glove_matrix.append(np.zeros(300,dtype=float))
return np.array(glove_matrix)
def get_random_data(annotation_line, input_shape,embed,config, train_mode=True, max_boxes=1):
'''random preprocessing for real-time data augmentation'''
SEG_DIR=config['seg_gt_path']
line = annotation_line.split()
h, w = input_shape
stop=len(line)
for i in range(1,len(line)):
if (line[i]=='~'):
stop=i
break
# print(line[1:stop])
box_ = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:stop]])
box=np.zeros([1,5])
seg_id=box_[0][-1]
box[0]=box_[0][:-1]
seg_map=np.load(os.path.join(SEG_DIR,str(seg_id)+'.npy'))
seg_map_ori=np.array(seg_map).astype(np.float32)
seg_map=Image.fromarray(seg_map_ori)
# print(np.shape(box))
# print(box)
#####################################
#sentence process maxlength set to 20 and random choose one for train
sentences=[]
sent_stop=stop+1
for i in range(stop+1,len(line)):
if line[i]=='~':
sentences.append(line[sent_stop:i])
sent_stop=i+1
sentences.append(line[sent_stop:len(line)])
choose_index=np.random.choice(len(sentences))
sentence=sentences[choose_index]
# print(qlist)
if config['use_bert']:
vocabs = load_vocabulary(config['bert_path']+'/vocab.txt')
word_vec=get_bert_input(sentence,vocabs,512)
else:
word_vec=qlist_to_vec(config['word_len'], sentence,embed)
# print(word_vec)
# print(np.shape(word_vec))
#######################################
image = Image.open(os.path.join(config['image_path'],line[0]))
iw, ih = image.size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
dx = (w - nw) // 2
dy = (h - nh) // 2
ori_image = image
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image) / 255.
seg_map = seg_map.resize((nw, nh))
new_map = Image.new('L', (w, h), (0))
new_map.paste(seg_map, (dx, dy))
seg_map_data = np.array(new_map)
seg_map_data = cv2.resize(seg_map_data, (
seg_map_data.shape[0] // config['seg_out_stride'], seg_map_data.shape[0] // config['seg_out_stride']),interpolation=cv2.INTER_NEAREST)
seg_map_data = np.reshape(seg_map_data, [np.shape(seg_map_data)[0], np.shape(seg_map_data)[1], 1])
# print(new_image.size)
# correct boxes
box_data = np.zeros((max_boxes, 5))
if len(box) > 0:
if len(box) > max_boxes: box = box[:max_boxes]
box[:, [0, 2]] = box[:, [0, 2]] * scale + dx
box[:, [1, 3]] = box[:, [1, 3]] * scale + dy
box_data[:len(box)] = box
box_data = box_data[:, 0:4] #delete classfy
if not train_mode:
word_vec=[qlist_to_vec(config['word_len'], sent,embed) for sent in sentences]
return image_data, box_data,word_vec,ori_image,sentences,np.expand_dims(seg_map_ori ,-1)
return image_data, box_data,word_vec,seg_map_data
def lr_step_decay(lr_start=0.001, steps=[30, 40]):
def get_lr(epoch):
decay_rate = len(steps)
for i, e in enumerate(steps):
if epoch < e:
decay_rate = i
break
lr = lr_start / (10 ** (decay_rate))
return lr
return get_lr
#powre decay
def lr_power_decay(lr_start=2.5e-4,lr_power=0.9, warm_up_lr=0.,step_all=45*1414,warm_up_step=1000):
# step_per_epoch=3286
def warm_up(base_lr, lr, cur_step, end_step):
return base_lr + (lr - base_lr) * cur_step / end_step
def get_learningrate(epoch):
if epoch<warm_up_step:
lr = warm_up(warm_up_lr, lr_start, epoch, warm_up_step)
else:
lr = lr_start * ((1 - float(epoch-warm_up_step) / (step_all-warm_up_step)) ** lr_power)
return lr
# print("learning rate is", lr)
return get_learningrate
| 2.734375
| 3
|
plotting/players_by_server.py
|
thundersen/ohol-data
| 0
|
12782273
|
#!env/bin/python3
import datetime
import sched
import pandas as pd
import plotly.graph_objs as go
import plotly.plotly as py
import requests
CSV_FILE = 'OholPlayersByServer.csv'
def process_current_player_counts():
data = fetch()
write(data, CSV_FILE)
draw(CSV_FILE)
def fetch():
timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
response = requests.get('http://onehouronelife.com/reflector/server.php?action=report')
response.raise_for_status()
raw = response.content
player_counts = [parse_player_count(line) for line in parse_server_lines(raw)]
return [timestamp] + player_counts
def parse_server_lines(raw):
return [line for line in str(raw).split('<br><br>') if line.startswith('|--> server')]
def parse_player_count(server_line):
return '' if server_line.endswith('OFFLINE') else server_line.split()[-3]
def write(data, filename):
data_line = ';'.join(data)
with open(filename, "a") as file:
file.write(data_line + '\n')
print(data_line)
def periodic(scheduler, interval, action):
scheduler.enter(interval, 1, periodic, (scheduler, interval, action))
action()
def draw(filename):
fig = dict(
data=arrange_plot_data(filename),
layout=dict(
title='OHOL Players by Server',
xaxis=dict(
rangeslider=dict(visible=True),
type='date'
)
))
upload_plot(fig)
def upload_plot(figure):
try:
py.plot(figure, filename=figure['layout']['title'], auto_open=False)
except Exception as e:
print('ERROR creating plot:\n{0}'.format(e))
def arrange_plot_data(filename):
servers = ['server%s' % (n + 1) for n in range(15)]
df = pd.read_csv(filename, sep=';', names=['timestamp'] + servers)
df['sum'] = df.apply(calculate_sum, axis=1)
data = [plot_column(name, df) for name in servers + ['sum']]
return data
def calculate_sum(row):
return sum(row[1:])
def plot_column(name, df):
return go.Scatter(x=df.timestamp, y=df[name], name=name)
if __name__ == '__main__':
s = sched.scheduler()
periodic(s, 5 * 60, process_current_player_counts)
s.run()
| 3.015625
| 3
|
coders/curso_python/fundamentos_projeto/area_circulo_v2.py
|
flaviogf/Cursos
| 2
|
12782274
|
<filename>coders/curso_python/fundamentos_projeto/area_circulo_v2.py
#!/usr/local/bin/python3
from math import pi
raio = 15
area = pi * raio**2
print(f'Area {area}')
| 2.515625
| 3
|
config.py
|
dziaineka/insider_bot
| 7
|
12782275
|
from os import getenv
from os.path import join, dirname
from dotenv import load_dotenv
# Create .env file path.
dotenv_path = join(dirname(__file__), ".env")
# Load file from the path.
load_dotenv(dotenv_path)
BOT_TOKEN = getenv('BOT_TOKEN', "")
CHAT_NAME = getenv('CHAT_NAME', "")
INSIDE_CHANNEL = getenv('INSIDE_CHANNEL', "")
| 2.21875
| 2
|
scripts/print_cil_from_bytes.py
|
mandiant/dncil
| 0
|
12782276
|
<reponame>mandiant/dncil<gh_stars>0
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import argparse
from dncil.cil.body import reader
from dncil.cil.error import MethodBodyFormatError
def main(args):
with open(args.path, "rb") as f_in:
dn = f_in.read()
try:
dn_body = reader.read_method_body_from_bytes(dn)
except MethodBodyFormatError as e:
print(e)
return
for insn in dn_body.instructions:
print(insn)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog="Print IL from the raw bytes of a managed method")
parser.add_argument("path", type=str, help="Full path to file containing raw bytes of managed method")
main(parser.parse_args())
| 2.359375
| 2
|
config.py
|
Theocrat/konfsave
| 0
|
12782277
|
<reponame>Theocrat/konfsave
import os
from pathlib import Path
if (_config_home := os.path.expandvars('$XDG_CONFIG_HOME')) != '$XDG_CONFIG_HOME':
CONFIG_HOME = Path(_config_home)
else:
CONFIG_HOME = Path.home() / '.config'
KONFSAVE_DATA_PATH = CONFIG_HOME / 'konfsave'
KONFSAVE_PROFILE_HOME = KONFSAVE_DATA_PATH / 'profiles'
KONFSAVE_CURRENT_PROFILE_PATH = KONFSAVE_DATA_PATH / 'current_profile'
_XDG_CONFIG_PATHS_TO_SAVE = {
# These paths are relative to $XDG_CONFIG_HOME (~/.config).
'gtk-2.0',
'gtk-3.0',
'Kvantum',
'konfsave/config.py',
'dolphinrc',
'konsolerc',
'kcminputrc',
'kdeglobals',
'kglobalshortcutsrc',
'klipperrc',
'krunnerrc',
'kscreenlockerrc',
'ksmserverrc',
'kwinrc',
'kwinrulesrc',
'plasma-org.kde.plasma.desktop-appletsrc',
'plasmarc',
'plasmashellrc',
'gtkrc',
'gtkrc-2.0'
}
PATHS_TO_SAVE = set(map(lambda p: os.path.join(Path.home(), p), {
# These paths are relative to the home directory.
'.kde4'
})) | set(map(lambda p: os.path.join(CONFIG_HOME, p), _XDG_CONFIG_PATHS_TO_SAVE))
| 2.09375
| 2
|
day13/13.py
|
stefsmeets/advent_of_code
| 0
|
12782278
|
<gh_stars>0
import numpy as np
filename = 'data.txt'
with open(filename) as f:
lines = (line.strip() for line in f.readlines())
dots = []
folds = []
for line in lines:
if line.startswith('fold along'):
direction, line_no = line.split()[-1].split('=')
line_no = int(line_no)
folds.append((direction, line_no))
elif line:
dots.append([int(val) for val in line.split(',')])
dots = np.array(dots)
shape = dots.max(axis=0) + 1
grid = np.zeros(shape)
grid[dots[:,0], dots[:,1]] = 1
for i, (direction, line_no) in enumerate(folds):
if direction == 'y':
slice_1 = np.s_[:, :line_no]
slice_2 = np.s_[:, line_no+1:]
flip = np.fliplr
else:
slice_1 = np.s_[:line_no]
slice_2 = np.s_[line_no+1:]
flip = np.flipud
folded = flip(grid[slice_2])
grid = grid[slice_1]
if direction == 'y':
start = grid.shape[1] - folded.shape[1]
grid[:, start:] += folded
else:
start = grid.shape[0] - folded.shape[0]
grid[start:] += folded
if i == 0:
n_dots_first_fold = np.sum(grid>0)
print(f'part 1: {n_dots_first_fold=}')
import matplotlib.pyplot as plt
plt.imshow((grid>0).T)
plt.title('Part 2')
plt.show()
| 2.59375
| 3
|
Daily Python/18_StockPredictionLR/18_StockPredictionLR.py
|
Harjiwan/Python
| 17
|
12782279
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
#Import the data
data = pd.read_csv("TSLA.csv")
print('Raw data from Yahoo Finance : ')
print(data.head())
#Remove date and Adj Close columns
data = data.drop('Date',axis=1)
data = data.drop('Adj Close',axis = 1)
print('\n\nData after removing Date and Adj Close : ')
print(data.head())
#Split into train and test data
data_X = data.loc[:,data.columns != 'Close' ]
data_Y = data['Close']
train_X, test_X, train_y,test_y = train_test_split(data_X,data_Y,test_size=0.25)
print('\n\nTraining Set')
print(train_X.head())
print(train_y.head())
#Creating the Regressor
regressor = LinearRegression()
regressor.fit(train_X,train_y)
#Make Predictions and Evaluate them
predict_y = regressor.predict(test_X)
print('Prediction Score : ' , regressor.score(test_X,test_y))
error = mean_squared_error(test_y,predict_y)
print('Mean Squared Error : ',error)
#Plot the predicted and the expected values
fig = plt.figure()
ax = plt.axes()
ax.grid()
ax.set(xlabel='Close ($)',ylabel='Open ($)',
title='Tesla Stock Prediction using Linear Regression')
ax.plot(test_X['Open'],test_y)
ax.plot(test_X['Open'],predict_y)
fig.savefig('LRPlot.png')
plt.show()
| 3.609375
| 4
|
cobra_utils/__init__.py
|
earmingol/cobra_utils
| 1
|
12782280
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from cobra_utils import io
from cobra_utils import query
from cobra_utils import topology
__version__ = "0.3.1"
| 1.015625
| 1
|
students/K33402/Velts Andrey/lab0304/backend/events/urls.py
|
ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021
| 4
|
12782281
|
<filename>students/K33402/Velts Andrey/lab0304/backend/events/urls.py
from rest_framework.routers import DefaultRouter
from .views import EventViewSet
router = DefaultRouter()
router.register(r"", EventViewSet, basename="events")
urlpatterns = router.urls
| 1.710938
| 2
|
iscan/scan.py
|
ZhengnanZhao/importscanner
| 3
|
12782282
|
"""Utilities to scan all Python files in a directory and
aggregate the names of all the imported packages
"""
import argparse
import ast
import os
from collections import Counter
from typing import Dict, Iterable, List, Optional, Tuple
from iscan.std_lib import separate_third_party_from_std_lib
class ImportScanner(ast.NodeVisitor):
"""Scanner to look for imported packages."""
def __init__(self) -> None:
self.imports = [] # type: ignore
def visit_Import(self, node: ast.Import) -> None:
"""Extract imports of the form `import foo`.
>>> import_statement = 'import os.path.join as jn, datetime.datetime as dt'
>>> ast.dump(ast.parse(import_statement))
"Module(body=[
Import(names=[alias(name='os.path.join', asname='jn'),
alias(name='datetime.datetime', asname='dt')])
])"
"""
for alias in node.names:
self.imports.append(alias.name)
self.generic_visit(node)
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
"""Extract imports of the form `from foo import bar`.
Relative imports such as `from ..utils import foo` will be ignored.
>>> import_statement = 'from os.path import join as jn, split'
>>> ast.dump(ast.parse(import_statement))
"Module(body=[
ImportFrom(module='os.path',
names=[alias(name='join', asname='jn'),
alias(name='split', asname=None)],
level=0)
])"
"""
# Ignore relative imports, for which node.level > 0
# E.g., `from ..utils import foo` has a node.level of 2
if node.level == 0:
self.imports.append(node.module)
self.generic_visit(node)
def get_imports(self) -> List[str]:
return sorted(self.imports)
def convert_source_to_tree(fpath: str) -> ast.Module:
"""Convert source code into abstract syntax tree.
Args:
fpath: Path to the Python file of interest
Returns:
AST representation of the source code
"""
with open(fpath, 'r') as f:
tree = ast.parse(f.read())
return tree
def scan_directory(dir_to_scan: str, dir_to_exclude: Optional[str] = None) -> List[str]:
"""Extract packages imported across all Python files in a directory.
Args:
dir_to_scan: Path to the directory of interest
dir_to_exclude: Path to the directory to be excluded during scanning
Returns:
Imported packages; might contain duplicates
"""
all_imports = []
for root_dir, _, fnames in os.walk(top=dir_to_scan):
# Skip excluded directory
if dir_to_exclude is not None:
if os.path.abspath(dir_to_exclude) in os.path.abspath(root_dir):
continue
for fname in fnames:
# Skip non-Python files
if not fname.endswith('.py'):
continue
# Convert source code into tree
fpath = os.path.join(root_dir, fname)
tree = convert_source_to_tree(fpath)
# Extract imports for current file
scanner = ImportScanner()
scanner.visit(tree)
all_imports.extend(scanner.get_imports())
return all_imports
def get_base_name(full_name: str) -> str:
"""Extract the base name of a package.
Args:
full_name: Full name of the package of interest, e.g., pandas.testing
Returns:
Base name of the provided package, e.g., pandas
"""
return full_name.split('.')[0]
def sort_counter(counter: Counter, alphabetical: bool) -> Dict[str, int]:
"""Sort counter according to custom logic.
Args:
counter: Imported packages and their corresponding count
alphabetical: Whether to sort counter alphabetically
Returns:
Sorted counter
"""
def custom_order(tup):
# Sort first by count (descending), and then by name
return -tup[1], tup[0]
sort_key = None if alphabetical else custom_order
return dict(sorted(counter.items(), key=sort_key))
def show_result(third_party: Dict[str, int], std_lib: Dict[str, int], ignore_std_lib: bool) -> None:
"""Print the result of running iscan.
Args:
third_party: Imported third-party packages and count
std_lib: Imported standard library modules and count
ignore_std_lib: Whether to omit standard library modules in the output
"""
result = '''
--------------------------
Third-party packages
--------------------------
NAME COUNT
'''
for name, count in third_party.items():
result += f'{name:<20} {count:>5}\n'
if not ignore_std_lib:
result += '''
--------------------------
Standard library modules
--------------------------
NAME COUNT
'''
for name, count in std_lib.items():
result += f'{name:<20} {count:>5}\n'
print(result)
def run(dir_to_scan: str, dir_to_exclude: Optional[str] = None) -> Tuple[Counter, Counter]:
"""Run iscan for a given set of parameters.
Args:
dir_to_scan: Path to the directory of interest
dir_to_exclude: Path to the directory to be excluded during scanning
Returns:
Imported third-party packages and count
Imported standard library modules and count
"""
full_packages = scan_directory(dir_to_scan, dir_to_exclude)
base_packages = map(get_base_name, full_packages)
third_party, std_lib = separate_third_party_from_std_lib(base_packages)
return Counter(third_party), Counter(std_lib)
def cli() -> argparse.Namespace:
"""Command line interface."""
parser = argparse.ArgumentParser(
allow_abbrev=False,
description='Aggregate third-party packages and standard library modules imported across all Python files in a given directory.' # noqa: E501
)
parser.add_argument(
'DIR_TO_SCAN',
help='target directory to scan'
)
parser.add_argument(
'-x',
default=None,
dest='DIR_TO_EXCLUDE',
help='directory to exclude during scanning'
)
parser.add_argument(
'--ignore-std-lib',
dest='IGNORE_STD_LIB',
action='store_const',
const=True,
default=False,
help='whether to leave standard library modules out of the report'
)
parser.add_argument(
'--alphabetical',
dest='ALPHABETICAL',
action='store_const',
const=True,
default=False,
help='whether to sort the report alphabetically'
)
return parser.parse_args()
def main() -> None:
args = cli()
third_party, std_lib = run(args.DIR_TO_SCAN, args.DIR_TO_EXCLUDE)
third_party = sort_counter(third_party, args.ALPHABETICAL) # type: ignore
std_lib = sort_counter(std_lib, args.ALPHABETICAL) # type: ignore
show_result(third_party, std_lib, args.IGNORE_STD_LIB)
| 2.921875
| 3
|
tools/dev/iamdb.py
|
chris-angeli-rft/cloud-custodian
| 8
|
12782283
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import json
URL = "https://awspolicygen.s3.amazonaws.com/js/policies.js"
def main():
raw_data = requests.get(URL).text
data = json.loads(raw_data[raw_data.find('=') + 1:])
perms = {}
for _, svc in data['serviceMap'].items():
perms[svc['StringPrefix']] = svc['Actions']
sorted_perms = {}
for k in sorted(perms):
sorted_perms[k] = sorted(perms[k])
with open('iam-permissions.json', 'w') as fh:
json.dump(sorted_perms, fp=fh, indent=2)
if __name__ == '__main__':
main()
| 2.109375
| 2
|
utils/steal-puzzles.py
|
jpverkamp/takuzu
| 1
|
12782284
|
<filename>utils/steal-puzzles.py
import bs4
import os
import requests
import sys
for size in [6, 8, 10, 12, 14]:
for level in [1, 2, 3, 4]:
nr = 0
while True:
nr += 1
response = requests.get('http://www.binarypuzzle.com/puzzles.php', params = {
'level': level,
'size': size,
'nr': nr
})
soup = bs4.BeautifulSoup(response.text, 'lxml')
# If we're more than the number of options, skip
puzzle_count = len(list(soup.find('select', {'name': 'nr'}).find_all('option')))
if nr > puzzle_count:
break
# Get the raw values as a single list
values = [
cel.text.strip() or '.'
for cel in soup.find_all('div', {'class': 'puzzlecel'})
]
path = os.path.join(
'..',
'puzzles',
'{size}x{size}'.format(size = size),
[None, 'easy', 'medium', 'hard', 'very-hard'][level],
'{nr:03d}.takuzu'.format(nr = nr)
)
print(path)
try:
os.makedirs(os.path.dirname(path))
except:
pass
with open(path, 'w') as fout:
for row in range(size):
fout.write(''.join(values[row * size : (row + 1) * size]))
fout.write('\n')
| 2.671875
| 3
|
filter-hitl-language/docai_utils.py
|
galz10/document-ai-samples
| 3
|
12782285
|
<filename>filter-hitl-language/docai_utils.py<gh_stars>1-10
"""
Document AI Functions
"""
from collections import defaultdict
from typing import Any
from google.cloud import documentai_v1 as documentai
from gcs_utils import (
get_files_from_gcs,
get_all_buckets,
create_bucket,
move_file,
)
UNDEFINED_LANGUAGE = "und"
def sort_document_files_by_language(
gcs_input_bucket: str, gcs_input_prefix: str, gcs_output_bucket: str
) -> None:
"""
Move files between buckets based on language
"""
blobs = get_files_from_gcs(gcs_input_bucket, gcs_input_prefix)
buckets = get_all_buckets()
# Output Document.json Files
for blob in blobs:
if ".json" not in blob.name:
print(f"Skipping non-supported file type {blob.name}")
continue
print(f"Downloading {blob.name}")
document = documentai.types.Document.from_json(
blob.download_as_bytes(), ignore_unknown_fields=True
)
# Find the most frequent language in the document
predominant_language = get_most_frequent_language(document)
print(f"Predominant Language: {predominant_language}")
# Create the output bucket if it does not exist
language_bucket_name = f"{gcs_output_bucket}{predominant_language}"
if language_bucket_name not in buckets:
print(f"Creating bucket {language_bucket_name}")
create_bucket(language_bucket_name)
buckets.add(language_bucket_name)
# Move Document.json file to bucket based on language
move_file(gcs_input_bucket, blob.name, language_bucket_name)
def get_most_frequent_language(document: documentai.Document) -> str:
"""
Returns the most frequent language in the document
"""
language_frequency: defaultdict[Any, int] = defaultdict(int)
for page in document.pages:
for language in page.detected_languages:
if language.language_code == UNDEFINED_LANGUAGE or (
language.confidence and language.confidence < 0.5
):
continue
language_frequency[language.language_code] += 1
return max(
language_frequency, key=language_frequency.get, default=UNDEFINED_LANGUAGE # type: ignore
)
| 2.671875
| 3
|
Dynamic-programming/minimum_string_edit.py
|
kimjiwook0129/Coding-Interivew-Cheatsheet
| 3
|
12782286
|
# Minimum operations needed to make A to B
# Insert, Remove, Replace Available
A, B = input(), input()
dp = [[0] * (len(B) + 1) for _ in range(len(A) + 1)]
for i in range(1, len(A) + 1):
dp[i][0] = i
for j in range(1, len(B) + 1):
dp[0][j] = j
for i in range(1, len(A) + 1):
for j in range(1, len(B) + 1):
if A[i - 1] == B[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + min(dp[i - 1][j], dp[i - 1][j - 1], dp[i][j - 1])
print(dp[len(A)][len(B)])
| 3
| 3
|
pyrestorm/paginators.py
|
alanjds/py-rest-orm
| 9
|
12782287
|
class RestPaginator(object):
'''Base paginator class which provides method templates.
'''
def __init__(self, page_size=20, **kwargs):
# Maximum number of elements expected to be returned. If None, max will be intelligently determined
self.max = kwargs.get('max', None)
# Current location of the cursor in the queryset
self.position = 0
# How many records should be retrived per request
self.page_size = page_size
def next(self):
'''Advances the cursor to the next valid location, if available.
Returns:
bool: True if successful, otherwise False.
'''
raise NotImplementedError
def prev(self):
'''Advances the cursor to the previous valid location, if available.
Returns:
bool: True if successful, otherwise False.
'''
raise NotImplementedError
def cursor(self, *args, **kwargs):
'''Moves the cursor to a specified position in the queryset.
Args:
position (int): What index of the queryset to seek to?
Returns:
bool: True if the cursors postion changed
'''
position = getattr(self, 'position', 0)
# Check for the 'required' position argument, move the cursor if provided
if len(args) == 1 and args[0] >= 0:
position = args[0]
# Determine if the cursor moved, then move it
cursor_moved = (position == self.position)
self.position = position
return cursor_moved
def set_max(self, maximum):
'''Sets the maximum range of the paginator.
'''
self.max = maximum
def as_params(self):
'''Converts attributes needed for URL encoding to **kwargs.
Returns:
dict: Key-value pairs for variables of the class instance.
'''
return {}
class DjangoRestFrameworkLimitOffsetPaginator(RestPaginator):
def __init__(self, limit=20, **kwargs):
# Parameter renaming
return super(DjangoRestFrameworkLimitOffsetPaginator, self).__init__(page_size=limit, **kwargs)
# Retrieved is meant to educate the paginator on the amount of results retrieved last request
def next(self):
if not self.page_size or not self.max:
return False
# If we don't know how many records there are, and we retrieved a full page last request, next could exist
# Or if advancing doesn't bring us past the known end
elif self.position + self.page_size <= self.max:
self.position += self.page_size
return True
return False
# Underflow logic is much simpler since start is a know position
def prev(self):
# Can't go any further back than the beginning
if self.position == 0:
return False
# If we will overshoot the beginning, floor to 0 index
elif self.position - self.page_size <= 0:
self.position = 0
# There is definitely enough room to go back
else:
self.position -= self.page_size
return True
def cursor(self, *args, **kwargs):
super(DjangoRestFrameworkLimitOffsetPaginator, self).cursor(*args, **kwargs)
self.page_size = kwargs.get('limit', self.page_size)
# Extract the number of results from the response
def set_max(self, response):
if self.max is None:
return super(DjangoRestFrameworkLimitOffsetPaginator, self).set_max(response['count'])
# Dictionary of URL params for pagination
def as_params(self):
params = {'offset': unicode(self.position)}
if self.page_size is not None:
params['limit'] = unicode(self.page_size)
return params
| 2.71875
| 3
|
utils/hashing.py
|
omgthatsjackie/keeper
| 1
|
12782288
|
from hashlib import pbkdf2_hmac
salt = b'<PASSWORD>'
def hash_password(password):
return pbkdf2_hmac('sha256', password.encode('utf-8'), salt, 100000).hex()[0:50]
| 2.75
| 3
|
code/gomap_setup.py
|
bioinformapping/GOMAP
| 0
|
12782289
|
#!/usr/bin/env python2
'''
This submodule lets the user download the data files necessary for running the GOMAP pipline from CyVerse
Currently the files are stored in Gokul's personal directory so the download has to be initiated by gokul's own CyVerse account with icommands
'''
import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil
from pprint import pprint
from code.utils.basic_utils import check_output_and_run
import tarfile
cyverse_path="i:/iplant/home/shared/dillpicl/gomap/GOMAP-data/"
from code.utils.logging_utils import setlogging
def setup(config):
setlogging(config,"setup")
"""
setup(config)
This function downloads the **GOMAP-data.tar.gz** directory from CyVerse and extracts the content to the **data** directory. The steps run by this function is given below
1. asdsdsa
2. sadsadsad
3. sadsadsad
Parameters
----------
config : dict
The config dict generated in the gomap.py script.
"""
outdir="data/"
cmd = ["irsync","-rsv",cyverse_path,outdir]
logging.info("Downloading file from Cyverse using irsync")
#The irsync will checksum the files on both ends and dtermine if the download is necessary and will only download if necessary
# might take time to check if the files needs to be downloaded
print(os.getcwd())
print(" ".join(cmd))
check_output_and_run("outfile",cmd)
with open("data/compress_files.txt","r") as comp_files:
counter=0
for infile in comp_files.readlines():
counter=counter+1
outfile = outdir+infile.strip()
gzfile = outdir+infile.strip()+".gz"
if os.path.exists(gzfile):
if os.path.exists(outfile):
print( gzfile + " already extracted")
else:
print("Extracting " + gzfile)
with gzip.open(gzfile,"rb") as in_f:
with open(outfile,"wb") as out_f:
shutil.copyfileobj(in_f,out_f)
os.remove(gzfile)
else:
print(gzfile + " doesn't exist")
with open("data/tar_files.txt","r") as comp_files:
for infile in comp_files.readlines():
infile=infile.strip()
outfile = outdir+infile.strip()
tar_f = outdir+infile.strip()+".tar.gz"
base_dir=os.path.basename(outfile)
if os.path.exists(tar_f):
if os.path.exists(outfile):
print(tar_f + " already extracted")
else:
print("Extracting " + tar_f)
with tarfile.open(tar_f) as tar:
tar.extractall("data/")
os.remove(tar_f)
else:
print(tar_f + " doesn't exist")
| 2.625
| 3
|
bin/get_html.py
|
ickc/pocket-export
| 1
|
12782290
|
#!/usr/bin/env python
import argparse
from functools import partial
from pathlib import Path
from requests_futures.sessions import FuturesSession
import pandas as pd
import numpy as np
# see https://stackoverflow.com/a/50039149
import resource
resource.setrlimit(resource.RLIMIT_NOFILE, (110000, 110000))
__version__ = '0.3'
HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'
}
def get_html(response, verbose=False):
try:
result = response.result()
if verbose:
print('Response from {} has status code {}.'.format(result.url, result.status_code))
assert result.status_code // 100 == 2
return result.content.decode()
except:
if verbose:
print('Error occured for {}'.format(response))
return None
def get_htmls(urls, max_workers=8, verbose=False, timeout=60):
session = FuturesSession(max_workers=max_workers)
if verbose:
n = len(urls)
print('Submitting {} jobs...'.format(n))
responses = [session.get(url, headers=HEADERS, timeout=timeout) for url in urls]
if verbose:
print('Executing {} jobs...'.format(n))
# if verbose, run a for loop to show progress explicitly
if verbose:
result = []
for i, response in enumerate(responses):
print('{} done, {} to go...'.format(i, n - i))
result.append(get_html(response, verbose=verbose))
return result
else:
return [get_html(response, verbose=verbose) for response in responses]
def get_htmls_archive(urls, max_workers=8, verbose=False, timeout=60):
urls = ['https://web.archive.org/web/' + url for url in urls]
return get_htmls(urls, max_workers=max_workers, verbose=verbose, timeout=timeout)
def main(path, output, verbose, worker, timeout):
df = pd.read_hdf(path)
# if output already existed, updates:
if Path(output).is_file():
df_old = pd.read_hdf(output)
# merging dfs
df_merged = df.merge(df_old[['html']], how='outer', left_index=True, right_index=True)
df = df_merged
# merging might have changed the orders
df.sort_values('time_added', inplace=True)
na_idx = df.html.isna()
n = np.count_nonzero(na_idx)
print('{} out of {} urls are new, fetching...'.format(n, df.shape[0]))
# fetch html
n_workers = worker if worker else n
df.loc[na_idx, 'html'] = get_htmls(df[na_idx].index, max_workers=n_workers, verbose=verbose, timeout=timeout)
else:
n = df.shape[0]
print('{} urls to fetch...'.format(n))
n_workers = worker if worker else n
df['html'] = get_htmls(df.index, max_workers=n_workers, verbose=verbose, timeout=timeout)
# no response
df['archive'] = df.html.isna()
n = np.count_nonzero(df.archive)
print('{} out of {} urls cannot be fetched, try fetching from archive.org...'.format(n, df.shape[0]))
n_workers = worker if worker else n
df.loc[df.archive, 'html'] = get_htmls_archive(df[df.archive].index, max_workers=n_workers, verbose=verbose, timeout=timeout)
df.to_hdf(
output,
'df',
format='table',
complevel=9,
)
def cli():
parser = argparse.ArgumentParser(description="Save url content in HDF5.")
parser.add_argument('input', help='Input urls in HDF5.')
parser.add_argument('-o', '--output', help='Output HDF5. Update file if exists.')
parser.add_argument('-p', '--worker', type=int,
help='No. of workers used. If not specified, use as many as needed.')
parser.add_argument('-t', '--timeout', type=float, default=60.,
help='Timeout specified for requests. Default: 60.')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s {}'.format(__version__))
parser.add_argument('-V', '--verbose', action='store_true',
help='verbose to stdout.')
args = parser.parse_args()
main(args.input, args.output, args.verbose, args.worker, args.timeout)
if __name__ == "__main__":
cli()
| 2.65625
| 3
|
phyllo/extractors/asconiusDB.py
|
oudalab/phyllo
| 0
|
12782291
|
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
# Note: The original ordering of chapters and verses was extremely complex.
# As a result, chapters are the bold headers and subsections are each p tag.
# Case 1: Sections split by numbers (Roman or not) followed by a period, or bracketed. Subsections split by <p> tags
def parsecase1(ptags, c, colltitle, title, author, date, URL):
# ptags contains all <p> tags. c is the cursor object.
chapter = '-1'
verse = 0
for p in ptags:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
passage = ''
text = p.get_text().strip()
# Skip empty paragraphs. and skip the last part with the collection link.
if len(text) <= 0 or text.startswith('Asconius\n'):
continue
chapterb = p.find('b')
if chapterb is not None and text[0].isalpha():
test = chapterb.find(text = True)
if text == test:
chapter = text
verse = 0
continue
passage = text
verse+=1
if passage.startswith('Asconius'):
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, passage.strip(), URL, 'prose'))
def main():
collURL = 'http://www.thelatinlibrary.com/asconius.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = collSOUP.title.string.strip()
colltitle = 'QUINTUS ASCONIUS PEDIANUS'
date = 'c. 9 B.C. - c. A.D. 76'
textsURL = [collURL]
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author='Asconius'")
for url in textsURL:
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
try:
title = textsoup.title.string.split(':')[1].strip()
except:
title = textsoup.title.string.strip()
getp = textsoup.find_all('p')
parsecase1(getp, c, colltitle, title, author, date, url)
logger.info("Program runs successfully.")
if __name__ == '__main__':
main()
| 3.171875
| 3
|
cctbx/examples/merging/__init__.py
|
hbrunie/cctbx_project
| 2
|
12782292
|
<gh_stars>1-10
from __future__ import absolute_import, division, print_function
from scitbx.examples import bevington # import dependency
import boost.python
ext = boost.python.import_ext("cctbx_large_scale_merging_ext")
from cctbx_large_scale_merging_ext import *
| 1.132813
| 1
|
main.py
|
SuperSystemStudio/Cleanning
| 0
|
12782293
|
import configparser
import os
import sys
config.read("path.ini")
config=configparser.ConfigParser()
config = open("./path.ini","r")
if config.read() == "":
config.add_section("path")
for root in os.walk("C:\Users\Lenovo\AppData\Local\kingsoft\WPS Cloud Files\userdata\qing\filecache"):
if root != ".3172735" or root != "configbackup":
config.set("path","WPS","C:\Users\Lenovo\AppData\Local\kingsoft\WPS Cloud Files\userdata\qing\filecache"+root)
if sys.argv[1] == "clean":
for a in config.sections("path")
for i in config.get("path", a)
os.removedirs(i)
| 2.265625
| 2
|
kratos/lib.py
|
IanBoyanZhang/kratos
| 39
|
12782294
|
import _kratos
from .generator import Generator
class SinglePortSRAM(Generator):
def __init__(self, macro_name: str, data_width: int, addr_width: int,
partial_write: bool = False, is_clone=False, sram_def=None):
if sram_def is None:
self.sram = _kratos.lib.SinglePortSRAM(Generator.get_context(),
macro_name,
addr_width, data_width,
partial_write)
else:
self.sram = sram_def
Generator.__init__(self, macro_name, is_clone=is_clone,
internal_generator=self.sram)
# proxy for properties
@property
def num_ports(self):
return self.sram.num_ports
@property
def addr_width(self):
return self.sram.addr_width
@property
def data_width(self):
return self.sram.data_width
@property
def capacity(self):
return self.sram.capacity()
# ports
# to change the name
# simply rename the port, e.g. sram.output_data.name = "Q_DATA"
@property
def output_data(self):
return self.sram.output_data
@property
def chip_enable(self):
return self.sram.chip_enable
@property
def write_enable(self):
return self.sram.write_enable
@property
def addr(self):
return self.sram.addr
@property
def input_data(self):
return self.sram.input_data
@property
def partial_write_mask(self):
return self.partial_write_mask
def bank_sram(generator_name, capacity, sram_def):
if isinstance(sram_def, SinglePortSRAM):
sram_def = sram_def.sram
else:
assert isinstance(sram_def, _kratos.lib.SinglePortSRAM)
sram = _kratos.lib.SinglePortSRAM(Generator.get_context(), generator_name,
capacity, sram_def)
# allow nested sram banks
return SinglePortSRAM(generator_name, sram.data_width, sram.addr_width,
sram.partial_write, False, sram_def=sram)
| 2.40625
| 2
|
src/input/telnet.py
|
fufuok/PyAgent
| 2
|
12782295
|
# -*- coding:utf-8 -*-
"""
telnet.py
~~~~~~~~
数据收集插件 - 端口检测
:author: Fufu, 2021/6/16
"""
from asyncio import ensure_future
from typing import Union
from . import InputPlugin
from ..libs.helper import get_dict_value
from ..libs.metric import Metric
from ..libs.net import chk_port
class Telnet(InputPlugin):
"""端口检测数据收集插件"""
# 模块名称
name = 'telnet'
async def gather(self) -> None:
"""获取数据(允许堆叠)"""
await self.perf_gather()
async def run_gather(self) -> None:
"""获取数据"""
tasks = []
for tag, conf in self.get_plugin_conf_value('target', {}).items():
address = get_dict_value(conf, 'address', '').strip()
if address:
as_ipv6 = get_dict_value(conf, 'ipv6', False)
timeout = get_dict_value(conf, 'timeout', 5)
tasks.append(ensure_future(self.run_telnet(tag, address, as_ipv6, timeout)))
# 等待任务执行
tasks and await self.run_tasks(tasks)
async def run_telnet(
self,
tag: str,
address: Union[str, tuple, list],
as_ipv6: bool = False,
timeout: int = 5,
) -> Metric:
"""执行检测并发送结果"""
yes, errcode = await self.to_thread(chk_port, address, None, as_ipv6, timeout)
metric = self.metric({
'tag': tag,
'address': address,
'as_ipv6': as_ipv6,
'timeout': timeout,
'yes': yes,
'errcode': errcode,
})
return metric
| 2.203125
| 2
|
accounts/views.py
|
rafaellima47/to-do-list-django
| 0
|
12782296
|
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib import auth
def login(request):
context = {}
if request.method == "POST":
user = auth.authenticate(username=request.POST["email"], password=request.POST["password"])
if user is not None:
auth.login(request, user)
return redirect("home")
else:
context["error"] = "Email or Password incorrect."
return render(request, "accounts/login.html", context)
def signup(request):
context = {}
if request.method == "POST":
# Check if the password and the confrom are the same
if request.POST["password1"] == request.POST["password2"]:
# Check if email is alrady been used
try:
user = User.objects.get(email=request.POST["email"])
context["error"] = "This email is already registred."
except User.DoesNotExist:
user = User.objects.create_user(request.POST["email"], email=request.POST["email"], password=request.POST["<PASSWORD>"])
auth.login(request, user)
return redirect("home")
else:
context["error"] = "Passwords must match."
return render(request, "accounts/signup.html", context)
def logout(request):
auth.logout(request)
return redirect("login")
| 2.53125
| 3
|
a12_freq.py
|
yqelcodes/FTD_work
| 0
|
12782297
|
import collections
import pandas as pd
big_list = [[{'автопродление': 1},
{'аккаунт': 1},
{'акция': 2},
{'безумный': 1},
{'бесплатно': 1},
{'бесплатнои': 1},
{'бесплатныи': 1},
{'бесплатный': 1},
{'бесценок': 1},
{'билет': 2},
{'бритва': 1},
{'бритвеныи': 1},
{'важный': 2},
{'вводить': 1},
{'деиствует': 1},
{'забудь': 1},
{'заполнять': 1},
{'заходить': 1},
{'заявка': 1},
{'идти': 1},
{'канал': 1},
{'карта': 1},
{'кино': 2},
{'кинопоиск': 1},
{'ленись': 1},
{'наидете': 1},
{'неделя': 1},
{'новыи': 1},
{'отключить': 1},
{'пара': 1},
{'первый': 1},
{'переходить': 1},
{'подписка': 2},
{'подписываися': 1},
{'покупка': 2},
{'покупке': 1},
{'получать': 1},
{'получение': 1},
{'почту': 1},
{'премиум': 1},
{'привязывать': 1},
{'прийти': 1},
{'промо': 1},
{'промокоду': 1},
{'регистрировать': 1},
{'регистрируемся': 1},
{'саит': 1},
{'сеичас': 1},
{'скидка': 2},
{'совершенно': 1},
{'станок': 1},
{'телеграм': 1},
{'экономить': 1}],
[{'неделя': 1},
{'получать': 1},
{'саит': 1},
{'скидка': 6},
{'автоматически': 1},
{'антивирус': 1},
{'антивирусы': 1},
{'бит': 1},
{'возможность': 1},
{'временной': 1},
{'выбрать': 1},
{'даваите': 1},
{'деиствительно': 1},
{'деиствия': 1},
{'деиствовать': 1},
{'дополнительнои': 1},
{'дополнительный': 1},
{'других': 1},
{'другое': 1},
{'ждать': 1},
{'запись': 1},
{'запустить': 1},
{'защитный': 1},
{'использовать': 1},
{'ключ': 2},
{'код': 3},
{'компьютер': 1},
{'мочь': 1},
{'наиболее': 1},
{'новость': 1},
{'обеспечение': 4},
{'обновить': 1},
{'ограничить': 2},
{'отличный': 1},
{'парк': 1},
{'планировать': 1},
{'полугодовой': 1},
{'получить': 1},
{'популярный': 1},
{'посмотреть': 1},
{'предложение': 1},
{'применение': 1},
{'программный': 4},
{'продукт': 2},
{'распродажа': 2},
{'саите': 1},
{'скидкои': 1},
{'следующии': 1},
{'следующий': 1},
{'снижение': 1},
{'специальный': 1},
{'срок': 1},
{'супер': 2},
{'течение': 1},
{'упустить': 1},
{'устроиств': 1},
{'устроиства': 1},
{'учётный': 1},
{'хотеть': 1},
{'цена': 9}],
[{'наидете': 1},
{'неделя': 1},
{'первый': 2},
{'скидка': 4},
{'деиствительно': 2},
{'других': 1},
{'предложение': 2},
{'распродажа': 2},
{'снижение': 1},
{'цена': 5},
{'instagram': 1},
{'twitter': 1},
{'большинство': 1},
{'бренд': 1},
{'верить': 1},
{'вернее': 1},
{'вид': 1},
{'видео': 2},
{'витрина': 1},
{'витринный': 1},
{'выгодный': 1},
{'гарантию': 1},
{'делать': 1},
{'день': 1},
{'диктофон': 1},
{'другои': 1},
{'жж': 1},
{'закрываться': 2},
{'интересный': 1},
{'каждыи': 1},
{'количество': 1},
{'кстати': 1},
{'купить': 1},
{'логотип': 1},
{'магазин': 2},
{'маркет': 1},
{'медиамаркт': 1},
{'наидется': 1},
{'наидутся': 1},
{'например': 1},
{'находиться': 1},
{'небольшой': 3},
{'недавно': 1},
{'низкий': 2},
{'обещать': 2},
{'обман': 1},
{'общий': 1},
{'остаться': 2},
{'осуществлять': 1},
{'пестреть': 1},
{'писать': 1},
{'повыбирать': 1},
{'позиция': 1},
{'понадобиться': 1},
{'посетителеи': 1},
{'правда': 1},
{'правильно': 1},
{'продавать': 1},
{'производитель': 1},
{'размер': 1},
{'распродажный': 1},
{'рекламировать': 1},
{'связь': 1},
{'сервис': 1},
{'скореи': 1},
{'случай': 4},
{'случиться': 1},
{'сменить': 1},
{'смотреть': 1},
{'событие': 1},
{'сообщение': 1},
{'сообщить': 1},
{'соцсеть': 2},
{'сравниваите': 1},
{'сравнивать': 1},
{'старт': 1},
{'существенно': 1},
{'товар': 2},
{'трансляция': 2},
{'тщательно': 1},
{'увеличивать': 1},
{'уменьшаться': 1},
{'уникальныи': 1},
{'финальный': 1},
{'ходовой': 1},
{'центр': 1},
{'экземпляр': 1}],
[{'покупка': 1},
{'выбрать': 1},
{'продукт': 1},
{'саите': 2},
{'магазин': 1},
{'сервис': 1},
{'товар': 3},
{'уникальныи': 1},
{'брать': 2},
{'выбор': 1},
{'выкуп': 1},
{'груз': 1},
{'днеи': 1},
{'забота': 2},
{'заказ': 2},
{'заниматься': 1},
{'интернет': 3},
{'каталог': 2},
{'категория': 1},
{'мелко': 1},
{'мск': 1},
{'набор': 2},
{'нужный': 1},
{'объединение': 1},
{'оставить': 1},
{'остальные': 1},
{'откроить': 1},
{'оформление': 1},
{'параметр': 1},
{'перепаковке': 1},
{'подарочныи': 1},
{'подарочный': 1},
{'поддержка': 1},
{'полностью': 1},
{'полныи': 1},
{'посылка': 1},
{'праздничный': 1},
{'разный': 1},
{'сделать': 1},
{'служба': 1},
{'соблюдение': 1},
{'собрать': 1},
{'ссылка': 1},
{'таможенный': 1},
{'телефон': 1},
{'требовании': 1},
{'удобныи': 1},
{'указание': 1},
{'шопинг': 1}],
[{'канал': 1},
{'мочь': 1},
{'цена': 1},
{'видео': 1},
{'смотреть': 1},
{'товар': 4},
{'ссылка': 1},
{'безусловно': 1},
{'большои': 1},
{'боцманскии': 1},
{'вариант': 1},
{'внутренний': 1},
{'военнои': 1},
{'возможный': 1},
{'входить': 1},
{'глаз': 1},
{'дерево': 1},
{'довольно': 1},
{'доступный': 1},
{'друг': 1},
{'жми': 1},
{'защёлка': 1},
{'иметь': 2},
{'инструмент': 1},
{'карман': 1},
{'классный': 1},
{'кольцо': 1},
{'комплект': 1},
{'которои': 1},
{'крепление': 1},
{'крутой': 2},
{'лезвие': 1},
{'марлина': 1},
{'металического': 1},
{'металом': 1},
{'модификациеи': 1},
{'молния': 1},
{'морской': 1},
{'мужик': 1},
{'мужчик': 1},
{'наидет': 1},
{'наити': 1},
{'найти': 1},
{'накладка': 1},
{'наличие': 1},
{'настоящий': 1},
{'начать': 1},
{'нежелательный': 1},
{'необходимый': 1},
{'нержавеики': 1},
{'нож': 2},
{'основнои': 1},
{'основный': 1},
{'особенность': 1},
{'отличительнои': 1},
{'палированным': 1},
{'пластик': 1},
{'поддеть': 1},
{'популярнои': 1},
{'потаиным': 1},
{'поэтому': 1},
{'правило': 1},
{'представлять': 1},
{'преимущество': 1},
{'привет': 1},
{'простота': 1},
{'работа': 1},
{'ремень': 6},
{'ремня': 1},
{'рукоятка': 1},
{'самое': 1},
{'связке': 1},
{'складный': 1},
{'слишком': 1},
{'смочь': 1},
{'собои': 1},
{'сокровенный': 1},
{'статья': 1},
{'страховочный': 1},
{'таиника': 1},
{'таиником': 1},
{'такои': 1},
{'твёрдый': 1},
{'тканевыи': 1},
{'толстыи': 1},
{'топчик': 1},
{'увидеть': 1},
{'узел': 1},
{'часть': 1},
{'шип': 1},
{'являться': 2}],
[{'канал': 1},
{'покупка': 1},
{'сеичас': 1},
{'скидка': 5},
{'других': 1},
{'супер': 1},
{'товар': 3},
{'нужный': 1},
{'подарочныи': 1},
{'подарочный': 1},
{'разный': 1},
{'ремень': 1},
{'барсучий': 1},
{'благородный': 1},
{'больший': 1},
{'бритьё': 1},
{'быстрый': 1},
{'восторженный': 1},
{'вставка': 1},
{'выделка': 1},
{'выполнить': 1},
{'высокий': 1},
{'год': 1},
{'двоиными': 1},
{'длина': 1},
{'добавить': 1},
{'документ': 1},
{'доставка': 1},
{'древесина': 1},
{'дужки': 1},
{'зажимами': 1},
{'защитои': 1},
{'зеркальный': 1},
{'изготовить': 1},
{'исполнение': 1},
{'качество': 1},
{'кисть': 2},
{'клапанах': 1},
{'ключеи': 1},
{'кожа': 1},
{'кожаный': 2},
{'комфортный': 1},
{'коричневыи': 1},
{'коробка': 1},
{'кошелёк': 1},
{'красивый': 1},
{'красота': 1},
{'крем': 1},
{'круглый': 1},
{'лаик': 1},
{'линза': 1},
{'лицо': 1},
{'материал': 2},
{'мелочеи': 1},
{'металлическии': 1},
{'металлический': 2},
{'мех': 1},
{'моделеи': 1},
{'модель': 1},
{'модный': 1},
{'молниях': 1},
{'мужской': 1},
{'мужчина': 2},
{'накладками': 1},
{'нанесение': 2},
{'наплечныи': 1},
{'наслаждение': 1},
{'натуральный': 1},
{'нежный': 1},
{'новинка': 1},
{'ноутбук': 1},
{'оправа': 1},
{'отделение': 2},
{'отзыв': 2},
{'отзывы': 1},
{'отличнои': 1},
{'очень': 2},
{'очки': 1},
{'пена': 2},
{'плохой': 1},
{'подписываитесь': 1},
{'подтяжка': 1},
{'покупателеи': 1},
{'покупатель': 1},
{'полный': 1},
{'помазок': 1},
{'понравиться': 1},
{'портфель': 1},
{'превращаться': 1},
{'прекрасныи': 1},
{'прекрасный': 1},
{'признателен': 1},
{'продавец': 1},
{'пружинои': 1},
{'рекомендовать': 2},
{'ретро': 1},
{'решение': 1},
{'ручка': 2},
{'сантиметр': 2},
{'сдержанный': 1},
{'сегодня': 1},
{'спандекс': 1},
{'сплава': 1},
{'стекло': 1},
{'стиль': 1},
{'стильный': 1},
{'сумка': 1},
{'темно': 1},
{'тысяча': 1},
{'удобный': 2},
{'удобство': 1},
{'удовольствие': 1},
{'ультрафиолет': 1},
{'упаковать': 2},
{'фотохромный': 1},
{'футляр': 1},
{'хороший': 1},
{'худой': 1},
{'цвет': 1},
{'цветовой': 1},
{'цинк': 1},
{'черныи': 1},
{'ширина': 1},
{'эластичныи': 1}],
[{'покупка': 4},
{'даваите': 1},
{'использовать': 1},
{'посмотреть': 2},
{'цена': 2},
{'интересный': 1},
{'магазин': 2},
{'товар': 5},
{'набор': 2},
{'разный': 1},
{'самое': 1},
{'складный': 1},
{'статья': 1},
{'качество': 1},
{'кожа': 1},
{'коробка': 1},
{'крем': 1},
{'новинка': 7},
{'подписываитесь': 1},
{'цвет': 4},
{'автомобилист': 1},
{'апрель': 4},
{'аромат': 1},
{'ассортимент': 2},
{'банныи': 1},
{'бельё': 1},
{'блокноты': 1},
{'вакуумный': 1},
{'весёлый': 1},
{'волос': 1},
{'гель': 1},
{'гигиена': 1},
{'горшки': 1},
{'губка': 1},
{'дача': 1},
{'двухъярусная': 1},
{'детеи': 1},
{'детский': 2},
{'дизаинами': 1},
{'дизаины': 1},
{'дом': 2},
{'душе': 1},
{'желать': 1},
{'забываите': 1},
{'завезти': 1},
{'завершить': 1},
{'зеркало': 1},
{'зонт': 1},
{'иванов': 1},
{'игрушка': 4},
{'идея': 1},
{'канцелярия': 1},
{'кинетический': 1},
{'клавиатура': 1},
{'компас': 1},
{'конец': 2},
{'конструктор': 1},
{'копилка': 1},
{'корзина': 1},
{'коробочка': 1},
{'косметика': 2},
{'крышкои': 1},
{'лаванда': 1},
{'лаики': 1},
{'летний': 1},
{'магнитик': 1},
{'март': 6},
{'мочалка': 1},
{'мытьё': 1},
{'надувной': 1},
{'наносить': 1},
{'начало': 1},
{'новинками': 1},
{'новый': 1},
{'обзор': 9},
{'отдел': 1},
{'отделе': 1},
{'отдых': 1},
{'отсек': 1},
{'пакет': 1},
{'песок': 1},
{'песочница': 1},
{'подарок': 1},
{'подготовить': 1},
{'подробныи': 1},
{'полезный': 1},
{'полка': 1},
{'полотенце': 2},
{'полочка': 1},
{'постельный': 1},
{'посуда': 3},
{'появиться': 3},
{'предполагать': 1},
{'представить': 2},
{'приятный': 1},
{'проводной': 1},
{'проидемся': 1},
{'производство': 1},
{'пропустить': 1},
{'просмотр': 1},
{'простынь': 1},
{'прямо': 1},
{'пятёрочка': 3},
{'ремешок': 1},
{'роза': 1},
{'рублеи': 14},
{'светодиодныи': 1},
{'сказать': 1},
{'см': 2},
{'снова': 2},
{'сожаление': 1},
{'состав': 1},
{'спасибо': 1},
{'ставить': 1},
{'страничка': 1},
{'сушка': 1},
{'творчество': 1},
{'тело': 1},
{'трость': 1},
{'удачный': 1},
{'указать': 2},
{'уход': 2},
{'хранение': 2},
{'цветок': 1},
{'цифровой': 1},
{'читаите': 1},
{'щётка': 1}],
[{'покупка': 3},
{'деиствительно': 1},
{'дополнительнои': 1},
{'получить': 1},
{'цена': 4},
{'выгодный': 3},
{'купить': 4},
{'магазин': 5},
{'продавать': 1},
{'товар': 2},
{'заказ': 1},
{'интернет': 2},
{'комплект': 2},
{'смочь': 2},
{'покупатель': 1},
{'желать': 1},
{'приятный': 1},
{'рублеи': 2},
{'база': 1},
{'батарейка': 1},
{'быстро': 1},
{'вагин': 6},
{'вагины': 1},
{'вибрациеи': 5},
{'внимание': 1},
{'волосик': 1},
{'вставляться': 1},
{'выгоднои': 1},
{'выносной': 1},
{'джанин': 8},
{'известнои': 1},
{'интим': 1},
{'качественныи': 1},
{'лицензионныи': 1},
{'лобке': 1},
{'любрикант': 1},
{'максимально': 1},
{'название': 1},
{'недорого': 1},
{'описание': 1},
{'особый': 1},
{'отверстие': 1},
{'оформить': 1},
{'пальчиковый': 1},
{'положить': 1},
{'порнозвезды': 1},
{'пульт': 1},
{'работать': 1},
{'светлый': 1},
{'секс': 2},
{'слепок': 1},
{'совершение': 1},
{'стимуляция': 1},
{'тип': 1},
{'уважаемые': 1},
{'яицо': 1}],
[{'планировать': 1},
{'цена': 2},
{'продавать': 4},
{'экземпляр': 1},
{'модель': 1},
{'очень': 3},
{'рублеи': 1},
{'спасибо': 1},
{'акрил': 1},
{'бахроме': 1},
{'белыи': 1},
{'буклированные': 1},
{'вещь': 1},
{'длинныи': 2},
{'достаточно': 1},
{'единственный': 1},
{'изменю': 1},
{'метр': 1},
{'моеи': 1},
{'мягкий': 1},
{'наматываться': 1},
{'нежныи': 1},
{'неузнаваемость': 1},
{'нитка': 2},
{'огромный': 1},
{'оксана': 1},
{'повтор': 1},
{'повторю': 1},
{'пушистый': 1},
{'радуга': 1},
{'руб': 3},
{'сиреневыи': 1},
{'тонкии': 1},
{'фиолетовый': 1},
{'черно': 1},
{'шарф': 2},
{'шею': 1}],
[{'срок': 1},
{'цена': 1},
{'другои': 1},
{'днеи': 1},
{'заказ': 1},
{'оформление': 1},
{'работа': 1},
{'длина': 1},
{'модель': 1},
{'цвет': 3},
{'рублеи': 1},
{'см': 1},
{'нитка': 1},
{'шарф': 1},
{'белый': 1},
{'выполню': 1},
{'двустороннии': 1},
{'двухслоиныи': 1},
{'красный': 1},
{'крючок': 1},
{'молот': 1},
{'надпись': 1},
{'однои': 1},
{'подарить': 1},
{'пряжи': 1},
{'связать': 1},
{'серп': 1},
{'сторона': 1},
{'шерстянои': 1},
{'шерстяной': 1}],
[{'других': 1},
{'хотеть': 2},
{'цена': 2},
{'купить': 2},
{'размер': 1},
{'товар': 4},
{'брать': 1},
{'полностью': 1},
{'сделать': 1},
{'мех': 1},
{'приятный': 1},
{'рублеи': 1},
{'состав': 1},
{'руб': 1},
{'ангора': 1},
{'вопрос': 1},
{'гольф': 1},
{'дело': 1},
{'засунуть': 1},
{'знать': 1},
{'китае': 1},
{'место': 1},
{'меховой': 1},
{'новогодний': 1},
{'носок': 1},
{'ощупь': 1},
{'полиамид': 1},
{'полиэстер': 2},
{'рассчитать': 1},
{'рука': 1},
{'самом': 1},
{'светофор': 4},
{'тёплый': 1},
{'успеть': 1},
{'эластан': 1}]]
flat_list = [item for sublist in big_list for item in sublist]
result = {}
for i in flat_list:
result.update(i)
counter = collections.Counter(result).most_common()
print(counter)
dframe = pd.DataFrame(counter, columns=["Word", "Count"])
dframe.to_csv('a12_freq_done.csv')
| 2.140625
| 2
|
src/api/datamanage/pro/datamodel/views/dmm_model_views.py
|
Chromico/bk-base
| 84
|
12782298
|
<reponame>Chromico/bk-base<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from datamanage.pro.datamodel.dmm.manager import (
CalculationAtomManager,
DataModelManager,
IndicatorManager,
MasterTableManager,
OperationLogManager,
)
from datamanage.pro.datamodel.handlers.constraint import get_field_constraint_tree_list
from datamanage.pro.datamodel.handlers.field_type import get_field_type_configs
from datamanage.pro.datamodel.models.model_dict import (
CalculationAtomType,
InnerField,
TimeField,
)
from datamanage.pro.datamodel.serializers.data_model import (
BkUserNameSerializer,
DataModelCreateSerializer,
DataModelDiffSerializer,
DataModelImportSerializer,
DataModelInfoSerializer,
DataModelListSerializer,
DataModelNameValidateSerializer,
DataModelOverviewSerializer,
DataModelReleaseSerializer,
DataModelUpdateSerializer,
FieldTypeListSerializer,
MasterTableCreateSerializer,
MasterTableListSerializer,
OperationLogListSerializer,
RelatedDimensionModelListSerializer,
ResultTableFieldListSerializer,
)
from datamanage.pro.datamodel.serializers.validators.url_params import convert_to_number
from datamanage.utils.api.meta import MetaApi
from rest_framework.response import Response
from common.auth import check_perm
from common.decorators import detail_route, list_route, params_valid
from common.local import get_request_username
from common.views import APIModelViewSet, APIViewSet
class DataModelViewSet(APIViewSet):
lookup_value_regex = "[0-9]+"
lookup_field = "model_id"
@params_valid(serializer=DataModelCreateSerializer)
def create(self, request, params):
"""
@api {post} /datamanage/datamodel/models/ *创建数据模型
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_create
@apiDescription 创建数据模型
@apiParam {String} model_name 模型名称
@apiParam {String} model_alias 模型别名
@apiParam {String} model_type 模型类型
@apiParam {String} description 模型描述
@apiParam {Int} project_id 项目id
@apiParam {List} tags 标签
@apiParam {String} bk_username 用户名
@apiParamExample {json} 参数样例:
{
"model_name": "fact_item_flow",
"model_alias": "道具流水表",
"model_type": "fact_table",
"description": "道具流水",
"tags": [
{
"tag_code":"common_dimension",
"tag_alias":"公共维度"
},{
"tag_code":"",
"tag_alias":"自定义标签名称"
}
],
"project_id": 4172,
"bk_username": "xx"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"model_id": 1,
"model_name": "fact_item_flow",
"model_alias": "道具流水表",
"model_type": "fact_table",
"description": "道具流水",
"tags": [
{
"alias": "公共维度",
"code": "common_dimension"
},
{
"alias": "测试标签",
"code": "c_tag_1603956645_976235_139710369135088"
}
],
"table_name": "fact_item_flow",
"table_alias": "道具流水表",
"project_id": 4172,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56",
"publish_status":"developing",
"active_status":"active",
"step_id": 1
}
}
"""
bk_username = get_request_username()
check_perm("datamodel.create", params["project_id"])
# 创建数据模型
datamodel_dict = DataModelManager.create_data_model(params, bk_username)
return Response(datamodel_dict)
@list_route(methods=["get"], url_path="validate_model_name")
@params_valid(serializer=DataModelNameValidateSerializer)
def validate_model_name(self, request, params):
"""
@api {get} /datamanage/datamodel/models/validate_model_name/ *判断数据模型名称是否存在
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_validate_model_name
@apiDescription 判断数据模型名称是否存在
@apiParam {String} model_name 模型名称
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": true
}
"""
is_model_name_existed = DataModelManager.validate_model_name(params)
return Response(is_model_name_existed)
@params_valid(serializer=DataModelUpdateSerializer)
def update(self, request, model_id, params):
"""
@api {put} /datamanage/datamodel/models/:model_id/ *修改数据模型
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_update
@apiDescription 修改数据模型
@apiParam {String} [model_alias] 模型别名
@apiParam {String} [description] 模型描述
@apiParam {List} [tags] 标签
@apiParam {String} bk_username 用户名
@apiParamExample {json} 参数样例:
{
"model_alias": "道具流水表",
"description": "道具流水",
"tags": [
{
"tag_code":"common_dimension",
"tag_alias":"公共维度"
},{
"tag_code":"",
"tag_alias":"自定义标签名称"
}
],
"bk_username": "xx"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"model_id": 1,
"model_name": "fact_item_flow",
"model_alias": "道具流水表",
"model_type": "fact_table",
"description": "道具流水",
"tags": [
{
"tag_code":"common_dimension",
"tag_alias":"公共维度"
},{
"tag_code":"",
"tag_alias":"自定义标签名称"
}
],
"table_name": "fact_item_flow",
"table_alias": "道具流水表",
"project_id": 4172,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56",
"publish_status":"developing",
"active_status":"active",
"step_id": 1
}
}
"""
model_id = convert_to_number("model_id", model_id)
bk_username = get_request_username()
check_perm("datamodel.update", model_id)
# 修改数据模型
datamodel_dict = DataModelManager.update_data_model(model_id, params, bk_username)
return Response(datamodel_dict)
def delete(self, request, model_id):
"""
@api {delete} /datamanage/datamodel/models/:model_id/ *删除数据模型(软删除)
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_delete
@apiDescription 删除数据模型
@apiParam {String} bk_username 用户名
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"model_id": 1,
"model_name": "fact_item_flow",
"model_alias": "道具流水表",
"model_type": "fact_table",
"description": "道具流水",
"tags": [
{
"tag_code":"common_dimension",
"tag_alias":"公共维度"
},{
"tag_code":"",
"tag_alias":"自定义标签名称"
}
],
"table_name": "fact_item_flow",
"table_alias": "道具流水表",
"project_id": 4172,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56",
"publish_status":"developing",
"active_status":"disabled"
}
}
"""
model_id = convert_to_number("model_id", model_id)
bk_username = get_request_username()
check_perm("datamodel.delete", model_id)
# 软删除数据模型
datamodel_dict = DataModelManager.delete_data_model(model_id, bk_username)
return Response(datamodel_dict)
@params_valid(serializer=DataModelListSerializer)
def list(self, request, params):
"""
@api {get} /datamanage/datamodel/models/ *数据模型列表
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_list
@apiDescription 数据模型列表
@apiParam {Int} [project_id] 项目id
@apiParam {String} [model_type] 模型类型,事实表模型/维度表模型
@apiParam {Int} [model_id] 模型id
@apiParam {String} [model_name] 模型名称
@apiParam {String} [keyword] 搜索关键字,支持模型名称/模型别名/模型描述/标签名称/标签别名
@apiParam {String} bk_username 用户名
@apiSuccess (返回) {Int} data.model_id 模型ID
@apiSuccess (返回) {String} data.model_name 模型名称
@apiSuccess (返回) {String} data.model_alias 模型别名
@apiSuccess (返回) {String} data.model_type 模型类型,fact_table/dimension_table
@apiSuccess (返回) {String} data.description 模型描述
@apiSuccess (返回) {List} data.tags 标签列表,包含标签名称和标签别名
@apiSuccess (返回) {String} data.table_name 主表名称
@apiSuccess (返回) {String} data.table_alias 主表别名
@apiSuccess (返回) {Int} data.project_id 项目id
@apiSuccess (返回) {String} data.created_by 创建人
@apiSuccess (返回) {String} data.created_at 创建时间
@apiSuccess (返回) {String} data.updated_by 更新人
@apiSuccess (返回) {String} data.updated_at 更新时间
@apiSuccess (返回) {String} data.publish_status 发布状态
@apiSuccess (返回) {String} data.active_status 可用状态
@apiSuccess (返回) {String} data.step_id 模型构建&发布完成步骤
@apiSuccess (返回) {Int} data.applied_count 应用数量
@apiSuccess (返回) {Boolean} data.sticky_on_top 模型是否置顶
@apiSuccess (返回) {Boolean} data.is_instantiated 模型是否被实例化
@apiSuccess (返回) {Boolean} data.is_quoted 模型是否被引用
@apiSuccess (返回) {Boolean} data.is_related 模型是否被关联
@apiSuccess (返回) {Boolean} data.can_be_deleted 模型是否可以被删除
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors":{},
"message":"ok",
"code":"1500200",
"result":true,
"data":[
{
"model_id":1,
"model_name":"fact_item_flow",
"model_alias":"道具流水表",
"model_type":"fact_table",
"description":"道具流水",
"tags": [
{
"tag_alias": "登出",
"tag_code": "logout"
}
],
"table_name":"fact_item_flow",
"table_alias":"道具流水表",
"project_id":4172,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56",
"publish_status":"developing",
"active_status":"active",
"step_id": 1,
"applied_count":2,
"sticky_on_top": true,
"is_instantiated": False,
"is_related": False,
"is_quoted": True,
"can_be_deleted": False
}
]
}
"""
datamodel_list = DataModelManager.get_data_model_list(params)
return Response(datamodel_list)
@detail_route(methods=["get"], url_path="dimension_models/can_be_related")
@params_valid(serializer=RelatedDimensionModelListSerializer)
def get_dim_model_list_can_be_related(self, request, model_id, params):
"""
@api {get} /datamanage/datamodel/models/:model_id/dimension_models/can_be_related/ 可以关联的维度模型列表
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_dimension_models_can_be_related
@apiDescription 可以关联的维度模型列表
@apiParam {Int} [model_id] 模型id
@apiParam {Int} [related_model_id] 关联模型id,用于前端点击关联模型设置回填
@apiSuccess (返回) {Int} data.model_id 模型ID
@apiSuccess (返回) {String} data.model_name 模型名称
@apiSuccess (返回) {String} data.model_alias 模型别名
@apiSuccess (返回) {Boolean} data.has_extended_fields 模型下除主键和时间字段以外是否有其他维度字段
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors":{},
"message":"ok",
"code":"1500200",
"result":true,
"data":[
{
"model_id":1,
"model_name":"fact_item_flow",
"model_alias":"道具流水表",
"has_extended_fields": True
}
]
}
"""
related_model_id = params["related_model_id"]
published = params["published"]
dmm_model_list = DataModelManager.get_dim_model_list_can_be_related(model_id, related_model_id, published)
return Response(dmm_model_list)
@detail_route(methods=["get"], url_path="info")
@params_valid(serializer=DataModelInfoSerializer)
def info(self, request, model_id, params):
"""
@api {get} /datamanage/datamodel/models/:model_id/info/ *数据模型详情
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_info
@apiDescription 数据模型详情
@apiParam {List} [with_details] 展示模型主表字段、模型关联关系、统计口径、指标等详情,
取值['release_info', 'master_table', 'calculation_atoms', 'indicators']
@apiSuccess (返回) {Int} data.model_id 模型ID
@apiSuccess (返回) {String} data.model_name 模型名称
@apiSuccess (返回) {String} data.model_alias 模型别名
@apiSuccess (返回) {String} data.table_name 主表名称
@apiSuccess (返回) {String} data.table_alias 主表别名
@apiSuccess (返回) {String} data.model_type 模型类型:fact_table/dimension_table
@apiSuccess (返回) {String} data.description 模型描述
@apiSuccess (返回) {List} data.tags 标签,例如[{"tag_alias": "道具", "tag_code": "props"}]
@apiSuccess (返回) {Int} data.project_id 项目id
@apiSuccess (返回) {String} data.active_status 可用状态:developing/published/re-developing
@apiSuccess (返回) {String} data.publish_status 发布状态:active/disabled/conflicting
@apiSuccess (返回) {Int} data.step_id 模型构建&发布完成步骤
@apiSuccess (返回) {Int} data.applied_count 应用数量
@apiSuccess (返回) {String} data.created_by 创建人
@apiSuccess (返回) {String} data.created_at 创建时间
@apiSuccess (返回) {String} data.updated_by 更新人
@apiSuccess (返回) {String} data.updated_at 更新时间
@apiSuccess (返回) {String} data.version_log 发布描述
@apiSuccess (返回) {String} data.release_created_by 发布者
@apiSuccess (返回) {String} data.release_created_at 发布时间
@apiSuccess (返回) {Json} data.model_detail 模型主表、统计口径和指标等详情,当with_details非空时展示
@apiSuccess (返回) {List} data.model_detail.fields 主表字段列表
@apiSuccess (返回) {Int} data.model_detail.fields.id 字段ID
@apiSuccess (返回) {Int} data.model_detail.fields.model_id 模型ID
@apiSuccess (返回) {String} data.model_detail.fields.field_name 字段名称
@apiSuccess (返回) {String} data.model_detail.fields.field_alias 字段别名
@apiSuccess (返回) {Int} data.model_detail.fields.field_index 字段位置
@apiSuccess (返回) {String} data.model_detail.fields.field_type 数据类型
@apiSuccess (返回) {String} data.model_detail.fields.field_category 字段类型:measure/dimension
@apiSuccess (返回) {String} data.model_detail.fields.is_primary_key 是否主键:True/False
@apiSuccess (返回) {String} data.model_detail.fields.description 字段描述
@apiSuccess (返回) {List} data.model_detail.fields.field_constraint_content 字段约束内容,例如:
{
"op": "OR",
"groups": [
{
"op": "AND",
"items": [
{"constraint_id": "", "constraint_content": ""},
{"constraint_id": "", "constraint_content": ""}
]
},
{
"op": "OR",
"items": [
{"constraint_id": "", "constraint_content": ""},
{"constraint_id": "", "constraint_content": ""}
]
}
]
}
@apiSuccess (返回) {Json} data.model_detail.fields.field_clean_content 清洗规则,例如:
{
"clean_option":"SQL",
"clean_content":"price * 100 as price"
}
@apiSuccess (返回) {List} data.model_detail.fields.origin_fields 计算来源字段,例如['price']
@apiSuccess (返回) {Int} data.model_detail.fields.source_model_id 拓展字段来源模型id
@apiSuccess (返回) {String} data.model_detail.fields.source_field_name 拓展字段来源模型字段
@apiSuccess (返回) {Boolean} data.model_detail.fields.is_join_field 是否主表关联字段
@apiSuccess (返回) {Boolean} data.model_detail.fields.is_extended_field 是否扩展字段
@apiSuccess (返回) {String} data.model_detail.fields.join_field_name 扩展字段对应的主表关联字段
@apiSuccess (返回) {List} data.model_detail.model_relation 主表关联关系
@apiSuccess (返回) {Int} data.model_detail.model_relation.model_id 主表模型ID
@apiSuccess (返回) {String} data.model_detail.model_relation.field_name 主表关联字段
@apiSuccess (返回) {Int} data.model_detail.model_relation.related_model_id 关联维度模型ID
@apiSuccess (返回) {String} data.model_detail.model_relation.related_field_name 关联维度模型关联字段
@apiSuccess (返回) {String} data.model_detail.model_relation.related_method 关联维度模型关联方法
@apiSuccess (返回) {Int} data.model_detail.calculation_atoms 统计口径列表
@apiSuccess (返回) {Int} data.model_detail.calculation_atoms.model_id 创建统计口径的模型ID
@apiSuccess (返回) {Int} data.model_detail.calculation_atoms.project_id 创建统计口径的项目ID
@apiSuccess (返回) {String} data.model_detail.calculation_atoms.calculation_atom_name 统计口径名称
@apiSuccess (返回) {String} data.model_detail.calculation_atoms.calculation_atom_alias 统计口径中文名
@apiSuccess (返回) {String} data.model_detail.calculation_atoms.calculation_atom_type 统计口径类型:create/quote
@apiSuccess (返回) {String} data.model_detail.calculation_atoms.description 统计口径描述
@apiSuccess (返回) {String} data.model_detail.calculation_atoms.field_type 统计口径字段类型
@apiSuccess (返回) {String} data.model_detail.calculation_atoms.calculation_content 统计方式,例如
表单提交示例:
{
'option': 'TABLE',
'content': {
'calculation_field': 'price',
'calculation_function': 'sum'
}
}
SQL提交示例:
{
'option': 'SQL',
'content': {
'calculation_formula': 'sum(price)'
}
}
@apiSuccess (返回) {String} data.model_detail.calculation_atoms.calculation_formula 统计SQL
@apiSuccess (返回) {String} data.model_detail.calculation_atoms.origin_fields 统计口径计算来源字段
@apiSuccess (返回) {Boolean} data.model_detail.calculation_atoms.editable 统计口径能否编辑
@apiSuccess (返回) {Boolean} data.model_detail.calculation_atoms.deletable 统计口径能否删除
@apiSuccess (返回) {List} data.model_detail.indicators 指标列表
@apiSuccess (返回) {String} data.model_detail.indicators.indicator_name 指标名称
@apiSuccess (返回) {String} data.model_detail.indicators.indicator_alias 指标中文名
@apiSuccess (返回) {String} data.model_detail.indicators.description 指标描述
@apiSuccess (返回) {String} data.model_detail.indicators.calculation_atom_name 指标统计口径
@apiSuccess (返回) {List} data.model_detail.indicators.aggregation_fields 指标聚合字段,例如['channel_name']
@apiSuccess (返回) {List} data.model_detail.indicators.aggregation_fields_alias 指标聚合字段中文名,['大区名称']
@apiSuccess (返回) {String} data.model_detail.indicators.filter_formula 指标过滤条件
@apiSuccess (返回) {String} data.model_detail.indicators.scheduling_type 指标调度类型:stream/batch
@apiSuccess (返回) {Json} data.model_detail.indicators.scheduling_content 指标调度内容,详见dataflow文档
离线参数示例:
{
"window_type": "fixed",
"count_freq": 1,
"schedule_period": "day",
"fixed_delay": 0,
"dependency_config_type": "unified",
"unified_config":{
"window_size": 1,
"window_size_period": "day",
"dependency_rule": "all_finished"
},
"advanced":{
"recovery_times":3,
"recovery_enable":false,
"recovery_interval":"60m"
}
}
实时参数示例:
{
"window_type":"scroll",
"window_lateness":{
"allowed_lateness":true,
"lateness_count_freq":60,
"lateness_time":6
},
"window_time":1440,
"count_freq":30,
"waiting_time":0
}
@apiSuccess (返回) {String} data.model_detail.indicators.parent_indicator_name 父指标名称
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors":{},
"message":"ok",
"code":"1500200",
"result":true,
"data":{
"model_id":1,
"model_name":"fact_item_flow",
"model_alias":"道具流水表",
"table_name":"fact_item_flow",
"table_alias":"道具流水表",
"model_type":"fact_table",
"description":"道具流水",
"tags":[
{
"tag_code":"common_dimension",
"tag_alias":"公共维度"
},{
"tag_code":"",
"tag_alias":"自定义标签名称"
}
],
"project_id":3,
"active_status":"active",
"publish_status":"developing",
"step_id": 1,
"applied_count":2,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56",
"model_detail":{
"fields":[
{
"id":1,
"field_name":"price",
"field_alias":"道具价格",
"field_index":1,
"field_type":"long",
"field_category":"metric",
"description":"道具价格",
"field_constraint_content":[
{"content": {"constraint_id": "gt", "constraint_content": "0"}}
],
"field_clean_content":{
"clean_option":"SQL",
"clean_content":"price * 100 as price"
},
"origin_fields":[
"price"
],
"source_model_id":null,
"source_field_name":null,
"is_join_field": false,
"is_extended_field": false,
"join_field_name": null,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
],
"model_relation":[
{
"model_id":1,
"field_name":"channel_id",
"related_model_id":2,
"related_field_name":"channel_id",
"related_method":"left-join"
}
],
"calculation_atoms":[
{
"model_id":1,
"project_id":3,
"calculation_atom_name":"item_sales_amt",
"calculation_atom_alias":"item_sales_amt",
"calculation_atom_type":"create",
"description":"item_sales_amt",
"field_type":"long",
"calculation_content":{
"option":"TABLE",
"content":{
"calculation_field":"price",
"calculation_function":"sum"
}
},
"calculation_formula":"sum(price)",
"origin_fields":[
"price"
],
"editable":true,
"deletable":false,
"quoted_count":0,
"indicator_count":1,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
],
"indicators":[
{
"model_id":1,
"project_id":3,
"indicator_name":"item_sales_amt_china_1d",
"indicator_alias":"国内每天按大区统计道具销售额",
"description":"国内每天按大区统计道具销售额",
"calculation_atom_name":"item_sales_amt",
"aggregation_fields":[
"channel_name"
],
"aggregation_fields_alias":[
"渠道号"
],
"filter_formula":"os='android'",
"scheduling_content":{
"window_type":"scroll",
"window_lateness":{
"allowed_lateness":true,
"lateness_count_freq":60,
"lateness_time":6
},
"window_time":1440,
"count_freq":30,
"waiting_time":0
},
"parent_indicator_name":null,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
]
}
}
}
"""
model_id = convert_to_number("model_id", model_id)
check_perm("datamodel.retrieve", model_id)
datamodel_dict = DataModelManager.get_data_model_info(model_id, params)
return Response(datamodel_dict)
@detail_route(methods=["get"], url_path="latest_version/info")
@params_valid(serializer=DataModelInfoSerializer)
def latest_version_info(self, request, model_id, params):
"""
@api {get} /datamanage/datamodel/models/:model_id/latest_version/info/ *数据模型最新发布版本详情
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_latest_version_info
@apiDescription 数据模型最新发布版本详情
@apiParam {List} [with_details] 展示统计口径 & 指标在草稿态中是否存在, 取值['existed_in_stage']
@apiSuccess (返回) {Int} data.model_id 模型ID
@apiSuccess (返回) {String} data.model_name 模型名称
@apiSuccess (返回) {String} data.model_alias 模型别名
@apiSuccess (返回) {String} data.table_name 主表名称
@apiSuccess (返回) {String} data.table_alias 主表别名
@apiSuccess (返回) {String} data.model_type 模型类型:fact_table/dimension_table
@apiSuccess (返回) {String} data.description 模型描述
@apiSuccess (返回) {List} data.tags 标签,例如[{"tag_alias": "道具", "tag_code": "props"}]
@apiSuccess (返回) {Int} data.project_id 项目id
@apiSuccess (返回) {String} data.active_status 可用状态:developing/published/re-developing
@apiSuccess (返回) {String} data.publish_status 发布状态:active/disabled/conflicting
@apiSuccess (返回) {Int} data.step_id 模型构建&发布完成步骤
@apiSuccess (返回) {Int} data.applied_count 应用数量
@apiSuccess (返回) {String} data.created_by 创建人
@apiSuccess (返回) {String} data.created_at 创建时间
@apiSuccess (返回) {String} data.updated_by 更新人
@apiSuccess (返回) {String} data.updated_at 更新时间
@apiSuccess (返回) {String} data.version_log 发布描述
@apiSuccess (返回) {String} data.release_created_by 发布者
@apiSuccess (返回) {String} data.release_created_at 发布时间
@apiSuccess (返回) {Json} data.model_detail 模型主表、统计口径和指标等详情
@apiSuccess (返回) {List} data.model_detail.fields 主表字段列表
@apiSuccess (返回) {List} data.model_detail.model_relation 主表关联关系
@apiSuccess (返回) {Int} data.model_detail.calculation_atoms 统计口径列表
@apiSuccess (返回) {List} data.model_detail.indicators 指标列表
"""
model_id = convert_to_number("model_id", model_id)
check_perm("datamodel.retrieve", model_id)
datamodel_dict = DataModelManager.get_data_model_latest_version_info(model_id, params["with_details"])
return Response(datamodel_dict)
@detail_route(methods=["post"], url_path="release")
@params_valid(serializer=DataModelReleaseSerializer)
def release(self, request, model_id, params):
"""
@api {post} /datamanage/datamodel/models/:model_id/release/ 数据模型发布
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_release
@apiDescription 数据模型发布
@apiParam {String} version_log 发布描述
@apiParamExample {json} 参数样例:
{
"version_log": "道具流水表发布"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": true
}
"""
bk_username = get_request_username()
model_id = convert_to_number("model_id", model_id)
check_perm("datamodel.update", model_id)
datamodel_release_dict = DataModelManager.release_data_model(model_id, params["version_log"], bk_username)
return Response(datamodel_release_dict)
@detail_route(methods=["get"], url_path="release_list")
def release_list(self, request, model_id):
"""
@api {get} /datamanage/datamodel/models/:model_id/release_list/ 数据模型发布列表
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_release_list
@apiDescription 数据模型发布列表
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"results": [
{
"created_at": "2020-11-26 00:28:32",
"version_log": "道具流水模型发布1.0.0",
"created_by": "admin"
"version_id": "xxxxx"
}
]
}
}
"""
model_id = convert_to_number("model_id", model_id)
check_perm("datamodel.retrieve", model_id)
datamodel_release_list = DataModelManager.get_data_model_release_list(model_id)
return Response({"results": datamodel_release_list})
@detail_route(methods=["get"], url_path="overview")
@params_valid(serializer=DataModelOverviewSerializer)
def overview(self, request, model_id, params):
"""
@api {get} /datamanage/datamodel/models/:model_id/overview/ *数据模型预览
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_overview
@apiDescription 数据模型预览,用于模型预览树形结构展示
@apiParam {Boolean} [latest_version] 是否返回模型最新发布版本预览信息
@apiSuccess (返回) {Int} data.nodes 节点,包括维表、主表、统计口径、指标,不同类型用node_type区分
@apiSuccess (返回) {String} data.lines 边
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors":{},
"message":"ok",
"code":"1500200",
"result":true,
"data":{
"nodes":[
{
"node_type":"fact_table",
"node_id":"fact_table-fact_item_flow",
"model_id":1,
"model_name":"fact_item_flow",
"model_alias":"道具流水表",
"model_type":"fact_table",
"description":"道具流水",
"tags":[
{
"tag_code":"common_dimension",
"tag_alias":"公共维度"
},{
"tag_code":"",
"tag_alias":"自定义标签名称"
}
],
"table_name":"fact_item_flow",
"table_alias":"道具流水表",
"project_id":4172,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56",
"publish_status":"developing",
"active_status":"active",
"applied_count":2,
"model_detail":{
"fields":[
{
"field_id":1,
"field_name":"price",
"field_alias":"道具价格",
"field_index":1,
"field_type":"long",
"field_category":"metric",
"description":"道具价格",
"field_constraint_content":null,
"field_clean_content":{
"clean_option":"SQL",
"clean_content":"price * 100 as price"
},
"source_model_id":null,
"source_field_name":null,
"is_join_field":false,
"is_extended_field":false,
"join_field_name":null,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
},
{
"field_id":2,
"field_name":"channel_id",
"field_alias":"渠道号",
"field_index":2,
"field_type":"string",
"field_category":"dimension",
"description":"渠道号",
"field_constraint_content":null,
"field_clean_content":null,
"source_model_id":null,
"source_field_name":null,
"is_join_field":true,
"is_extended_field":false,
"join_field_name":null,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
]
}
},
{
"node_type":"dimension_table",
"node_id":"dimension_table-dm_channel",
"model_id":2,
"model_name":"dm_channel",
"model_alias":"渠道表",
"model_type":"dimension_table",
"description":"渠道表",
"tags":[
{
"tag_code":"common_dimension",
"tag_alias":"公共维度"
},{
"tag_code":"",
"tag_alias":"自定义标签名称"
}
],
"table_name":"dm_channel",
"table_alias":" 渠道表",
"project_id":4172,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56",
"publish_status":"developing",
"active_status":"active",
"applied_count":2,
"fields":[
{
"field_id":3,
"field_name":"channel_id",
"field_alias":"渠道号",
"field_index":1,
"field_type":"string",
"field_category":"dimension",
"description":"渠道号",
"field_constraint_content":null,
"field_clean_content":null,
"source_model_id":null,
"source_field_name":null,
"is_join_field":false,
"is_extended_field":false,
"join_field_name":null,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
]
},
{
"node_type":"calculation_atom",
"node_id":"calculation_atom-item_sales_amt",
"calculation_atom_name":"item_sales_amt",
"calculation_atom_alias":"item_sales_amt",
"description":"item_sales_amt",
"field_type":"long",
"calculation_content":{
"option":"table",
"content":{
"calculation_field":"price",
"calculation_function":"sum"
}
},
"calculation_formula":"sum(price)",
"indicator_count":2,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
},
{
"node_type":"indicator",
"node_id":"indicator-item_sales_amt",
"model_id":1,
"indicator_name":"item_sales_amt_china_1d",
"indicator_alias":"国内每天按大区统计道具销售额",
"description":"国内每天按大区统计道具销售额",
"calculation_atom_name":"item_sales_amt",
"aggregation_fields":[
"channel_name"
],
"filter_formula":"os='android'",
"scheduling_content":{},
"parent_indicator_name":null,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
],
"lines":[
{
"from":"dimension_table-dm_channel",
"to":"fact_table_fact-item_flow",
"from_field_name": "channel_id",
"to_field_name": "channel_id",
},
{
"from":"fact_table_fact-item_flow",
"to":"calculation_atom-item_sales_amt"
},
{
"from":"calculation_atom-item_sales_amt",
"to":"indicator-item_sales_amt"
}
]
}
}
"""
model_id = convert_to_number("model_id", model_id)
check_perm("datamodel.retrieve", model_id)
datamodel_overview_dict = DataModelManager.get_data_model_overview_info(
model_id, latest_version=params["latest_version"]
)
return Response(datamodel_overview_dict)
@detail_route(methods=["get"], url_path="diff")
@params_valid(serializer=DataModelDiffSerializer)
def diff(self, request, model_id, params):
"""
@api {get} /datamanage/datamodel/models/:model_id/diff/ 数据模型变更内容
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_diff
@apiDescription 数据模型变更内容, 用模型当前内容和dmm_model_release的latest版本的model_content作diff
@apiParam {String} [orig_version_id] 源版本ID
@apiParam {String} [new_version_id] 目标版本ID
@apiSuccess (返回) {Json} data.orig_contents 源版本
@apiSuccess (返回) {Json} data.new_content 当前版本
@apiSuccess (返回) {Json} data.diff 变更内容
@apiSuccess (返回) {Json} data.diff.diff_result 变更结论
@apiSuccess (返回) {Int} data.diff.diff_result.create 新增数目
@apiSuccess (返回) {Int} data.diff.diff_result.update 变更数目
@apiSuccess (返回) {Int} data.diff.diff_result.delete 删除数目
@apiSuccess (返回) {Int} data.diff.diff_result.field 字段变更结论
@apiSuccess (返回) {Int} data.diff.diff_result.field.create 字段新增数目
@apiSuccess (返回) {Int} data.diff.diff_result.field.update 字段变更数目
@apiSuccess (返回) {Int} data.diff.diff_result.field.delete 字段删除数目
@apiSuccess (返回) {Int} data.diff.diff_result.field.field_index_update 字段顺序变更数目
@apiSuccess (返回) {List} data.diff.diff_objects 变更对象
@apiSuccess (返回) {String} data.diff.diff_objects.object_type 对象类型
@apiSuccess (返回) {String} data.diff.diff_objects.object_id 对象ID
@apiSuccess (返回) {String} data.diff.diff_objects.diff_type 对象变更类型
@apiSuccess (返回) {List} [data.diff.diff_objects.diff_keys] 变更内容对应的keys
@apiSuccess (返回) {List} [data.diff.diff_objects.diff_objects] 变更字段列表
@apiSuccess (返回) {String} data.diff.diff_objects.diff_objects.object_type 对象类型
@apiSuccess (返回) {String} data.diff.diff_objects.diff_objects.object_id 对象ID
@apiSuccess (返回) {String} data.diff.diff_objects.diff_objects.diff_type 对象变更类型
@apiSuccess (返回) {List} [data.diff.diff_objects.diff_objects.diff_keys] 变更内容对应的keys
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"orig_contents": {
"created_at": "admin",
"created_by": "2020-12-11 15:41:28",
"objects": [
{
"object_type": "master_table",
"object_id": "fact_table-fact_item_flow",
"fields": [
{
"object_type": "field",
"object_id": "field-price",
"field_name":"price",
"field_alias":"道具价格",
"field_index":1,
"field_type":"long",
"field_category":"metric",
"description":"道具价格",
"field_constraint_content":null,
"field_clean_content":{
"clean_option":"SQL",
"clean_content":"price * 100 as price"
},
"source_model_id":null,
"source_field_name":null,
"is_join_field":false,
"is_extended_field":false,
"join_field_name":null,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
]
},
{
"object_type": "calculation_atom",
"object_id": "calculation_atom-item_sales_amt",
"calculation_atom_name":"item_sales_amt",
"calculation_atom_alias":"item_sales_amt",
"description":"item_sales_amt",
"field_type":"long",
"calculation_content":{
"option":"table",
"content":{
"calculation_field":"price",
"calculation_function":"sum"
}
},
"calculation_formula":"sum(price)",
"indicator_count":2,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
]
},
"new_contents": {
"created_at": "admin",
"created_by": "2020-12-11 15:41:28",
"objects": [
{
"object_type": "master_table",
"object_id": "fact_table-fact_item_flow",
"fields": [
{
"object_type": "field",
"object_id": "field-price",
"field_name":"price",
"field_alias":"道具价格",
"field_index":1,
"field_type":"long",
"field_category":"metric",
"description":"道具价格1",
"field_constraint_content":null,
"field_clean_content":{
"clean_option":"SQL",
"clean_content":"price * 100 as price"
},
"source_model_id":null,
"source_field_name":null,
"is_join_field":false,
"is_extended_field":false,
"join_field_name":null,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
]
},
{
"object_type": "indicator",
"object_id": "indicator-item_sales_amt",
"indicator_name":"item_sales_amt_china_1d",
"indicator_alias":"国内每天按大区统计道具销售额",
"description":"国内每天按大区统计道具销售额",
"calculation_atom_name":"item_sales_amt",
"aggregation_fields":[
"channel_name"
],
"filter_formula":"os='android'",
"scheduling_content":{
"window_type":"fixed",
"count_freq":1,
"schedule_period":"day",
"fixed_delay":0,
"fallback_window":1
},
"parent_indicator_name":null,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
]
},
"diff": {
"diff_result": {
"create": 1,
"update": 1,
"delete": 1,
"field": {
"field_index_update": 2,
"create": 1,
"update": 2,
"delete": 0
}
},
"diff_objects": [
{
"object_type": "master_table",
"object_id": "master_table-fact_item_flow",
"diff_type": "update",
"diff_objects":[
{
"object_type": "field",
"object_id": "field-price",
"diff_type": "update",
"diff_keys": ["description"]
}
]
},
{
"object_type": "calculation_atom",
"object_id": "calculation_atom-item_sales_amt",
"diff_type": "delete",
"diff_keys": ["description"]
},
{
"object_type": "indicator",
"object_id": "indicator-item_sales_amt",
"diff_type": "create",
"diff_keys": ["description"]
}
]
}
}
}
"""
model_id = convert_to_number("model_id", model_id)
check_perm("datamodel.retrieve", model_id)
orig_version_id = params["orig_version_id"]
new_version_id = params["new_version_id"]
# 模型两个指定版本间diff
if orig_version_id or new_version_id:
diff_dict = DataModelManager.diff_data_model_version_content(model_id, orig_version_id, new_version_id)
# 模型上一个发布版本内容 和 当前内容diff
else:
diff_dict = DataModelManager.diff_data_model(model_id)
return Response(diff_dict)
@detail_route(methods=["get"], url_path="export")
def export_datamodel(self, request, model_id):
"""
@api {get} /datamanage/datamodel/models/:model_id/export/ 导出模型
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_export
@apiDescription 导出模型
@apiSuccess (返回) {Int} data.model_id 模型ID
@apiSuccess (返回) {Int} data.project_id 项目id
@apiSuccess (返回) {String} data.model_name 模型名称
@apiSuccess (返回) {String} data.model_alias 模型别名
@apiSuccess (返回) {String} data.model_type 模型类型
@apiSuccess (返回) {String} data.description 模型描述
@apiSuccess (返回) {String} data.table_name 主表名称
@apiSuccess (返回) {String} data.table_alias 主表别名
@apiSuccess (返回) {String} data.publish_status 发布状态
@apiSuccess (返回) {String} data.active_status 可用状态
@apiSuccess (返回) {List} data.tags 标签
@apiSuccess (返回) {String} data.created_by 创建人
@apiSuccess (返回) {String} data.created_at 创建时间
@apiSuccess (返回) {String} data.updated_by 更新人
@apiSuccess (返回) {String} data.updated_at 更新时间
@apiSuccess (返回) {Int} data.applied_count 应用数量
@apiSuccess (返回) {Json} data.model_detail 模型主表、统计口径等详情
@apiSuccess (返回) {Json} data.model_detail.fields 主表字段信息
@apiSuccess (返回) {Json} data.model_detail.model_relation 模型关联关系
@apiSuccess (返回) {Json} data.model_detail.calculation_atoms 模型统计口径
@apiSuccess (返回) {Json} data.model_detail.indicators 模型指标
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors":{},
"message":"ok",
"code":"1500200",
"result":true,
"data":{
"model_id":1,
"model_name":"fact_item_flow",
"model_alias":"道具流水表",
"model_type":"fact_table",
"description":"道具流水",
"tags":[
{
"tag_code":"common_dimension",
"tag_alias":"公共维度"
},{
"tag_code":"",
"tag_alias":"自定义标签名称"
}
],
"table_name":"fact_item_flow",
"table_alias":"道具流水表",
"project_id":4172,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56",
"publish_status":"developing",
"active_status":"active",
"applied_count":2,
"model_detail":{
"fields":[
{
"field_id":1,
"field_name":"price",
"field_alias":"道具价格",
"field_index":1,
"field_type":"long",
"field_category":"metric",
"description":"道具价格",
"field_constraint_content":null,
"field_clean_content":{
"clean_option":"SQL",
"clean_content":"price * 100 as price"
},
"source_model_id":null,
"source_field_name":null,
"is_join_field": false,
"is_extended_field": false,
"join_field_name": null,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
},
{
"field_id":2,
"field_name":"channel_id",
"field_alias":"渠道号",
"field_index":2,
"field_type":"string",
"field_category":"dimension",
"description":"渠道号",
"field_constraint_content":null,
"field_clean_content":null,
"source_model_id":null,
"source_field_name":null,
"is_join_field": true,
"is_extended_field": false,
"join_field_name": null,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
],
"model_relation":[
{
"model_id":1,
"field_name":"channel_id",
"related_model_id":2,
"related_field_name":"channel_id",
"related_method":"left-join"
}
],
"calculation_atoms":[
{
"calculation_atom_name":"item_sales_amt",
"calculation_atom_alias":"item_sales_amt",
"description":"item_sales_amt",
"field_type":"long",
"calculation_content":{
"option":"table",
"content":{
"calculation_field":"price",
"calculation_function":"sum"
}
},
"calculation_formula":"sum(price)",
"indicator_count":2,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
],
"indicators":[
{
"model_id":1,
"indicator_name":"item_sales_amt_china_1d",
"indicator_alias":"国内每天按大区统计道具销售额",
"description":"国内每天按大区统计道具销售额",
"calculation_atom_name":"item_sales_amt",
"aggregation_fields":[
"channel_name"
],
"filter_formula":"os='android'",
"scheduling_content":{},
"parent_indicator_name":null,
"created_by":"admin",
"created_at":"2020-10-18 15:38:56",
"updated_by":"admin",
"updated_at":"2020-10-18 15:38:56"
}
]
}
}
}
"""
model_id = convert_to_number("model_id", model_id)
check_perm("datamodel.retrieve", model_id)
datamodel_dict = DataModelManager.get_data_model_info(
model_id,
{"with_details": ["master_table", "calculation_atoms", "indicators"]},
)
return Response(datamodel_dict)
@list_route(methods=["post"], url_path="import")
@params_valid(serializer=DataModelImportSerializer)
def import_datamodel(self, request, params):
"""
@api {post} /datamanage/datamodel/models/import/ 导入模型
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_import
@apiDescription 导入模型
@apiParamExample {json} 参数样例:
{
"model_name":"fact_item_flow_import",
"project_id":3,
"model_alias":"道具流水表",
"model_type":"fact_table",
"description":"模型描述",
"tags":[
{
"tag_code":"props",
"tag_alias":"道具"
}
],
"model_detail":{
"indicators":[
{
"indicator_name":"sum_props_price_180s",
"indicator_alias":"指标中文名",
"description":"指标描述",
"calculation_atom_name":"sum_props_price",
"aggregation_fields":[
],
"filter_formula":"-- WHERE 之后的语句",
"scheduling_type":"stream",
"scheduling_content":{
"window_type":"scroll",
"count_freq":180,
"format_window_size":180,
"window_lateness":{
"allowed_lateness":false,
"lateness_count_freq":60,
"lateness_time":1
},
"window_time":1440,
"expired_time":0,
"format_window_size_unit":"s",
"session_gap":0,
"waiting_time":0
}
}
],
"fields":[
{
"field_name":"id",
"field_alias":"主键id",
"field_index":1,
"field_type":"int",
"field_category":"dimension",
"is_primary_key":false,
"description":"主键id",
"field_constraint_content":null,
"field_clean_content":null,
"source_model_id":null,
"source_field_name":null
},
{
"field_name":"price",
"field_alias":"渠道xxx",
"field_index":2,
"field_type":"int",
"field_category":"measure",
"is_primary_key":false,
"description":"价格",
"field_constraint_content":{
"groups":[
{
"items":[
{
"constraint_id":"gte",
"constraint_content":"0"
},
{
"constraint_id":"gte",
"constraint_content":"100"
}
],
"op":"AND"
},
{
"items":[
{
"constraint_id":"not_null",
"constraint_content":null
}
],
"op":"AND"
}
],
"op":"AND"
},
"field_clean_content":{
"clean_option":"SQL",
"clean_content":"price as price"
},
"source_model_id":null,
"source_field_name":null
},
{
"field_name":"channel_id",
"field_alias":"渠道号",
"field_index":12,
"field_type":"string",
"field_category":"dimension",
"is_primary_key":false,
"description":"渠道号",
"field_constraint_content":{
"groups":[
{
"items":[
{
"constraint_id":"not_null",
"constraint_content":null
}
],
"op":"AND"
}
],
"op":"AND"
},
"field_clean_content":{
"clean_option":"SQL",
"clean_content":"channel_id as channel_id"
},
"source_model_id":null,
"source_field_name":null
},
{
"field_name":"channel_description",
"field_alias":"渠道描述",
"field_index":4,
"field_type":"string",
"field_category":"dimension",
"is_primary_key":false,
"description":"渠道描述",
"field_constraint_content":null,
"field_clean_content":null,
"source_model_id":67,
"source_field_name":"channel_description"
},
{
"field_name":"__time__",
"field_alias":"时间字段",
"field_index":5,
"field_type":"timestamp",
"field_category":"dimension",
"is_primary_key":false,
"description":"平台内置时间字段,数据入库后将装换为可查询字段,比如 dtEventTime/dtEventTimeStamp/localtime",
"field_constraint_content":null,
"field_clean_content":null,
"source_model_id":null,
"source_field_name":null
}
],
"model_relation":[
{
"related_method":"left-join",
"related_model_id":67,
"field_name":"channel_id",
"related_field_name":"id"
}
],
"calculation_atoms":[
{
"calculation_atom_name":"sum_props_price_calc",
"calculation_atom_alias":"统计口径中文名",
"description":"统计口径描述",
"field_type":"long",
"calculation_content":{
"content":{
"calculation_formula":"sum(price)+1+1+1"
},
"option":"SQL"
}
}
]
}
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors":{},
"message":"ok",
"code":"1500200",
"result":true,
"data":{
"model_id": 23
}
"""
bk_username = get_request_username()
check_perm("datamodel.create", params["project_id"])
# 创建数据模型
datamodel_dict = DataModelManager.create_data_model(params, bk_username)
# 判断是否有模型的修改权限
check_perm("datamodel.update", datamodel_dict["model_id"])
# 创建主表
MasterTableManager.update_master_table(datamodel_dict["model_id"], params["model_detail"], bk_username)
# 创建统计口径
for calc_atom_dict in params["model_detail"]["calculation_atoms"]:
calc_atom_dict["model_id"] = datamodel_dict["model_id"]
if calc_atom_dict.get("calculation_atom_type", None) == CalculationAtomType.QUOTE:
CalculationAtomManager.quote_calculation_atoms(
{
"model_id": datamodel_dict["model_id"],
"calculation_atom_names": [calc_atom_dict["calculation_atom_name"]],
},
bk_username,
)
else:
CalculationAtomManager.create_calculation_atom(calc_atom_dict, bk_username)
# 创建指标
for indicator_dict in params["model_detail"]["indicators"]:
indicator_dict["model_id"] = datamodel_dict["model_id"]
IndicatorManager.create_indicator(indicator_dict, bk_username)
return Response({"model_id": datamodel_dict["model_id"]})
@detail_route(methods=["get"], url_path="operators")
def operator_list(self, request, model_id):
"""
@api {get} /datamanage/datamodel/models/:model_id/operators/ 操作者列表
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_operators
@apiDescription 模型操作者列表
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"results": [
"admin1",
"admin2"
]
}
}
"""
model_id = convert_to_number("model_id", model_id)
check_perm("datamodel.retrieve", model_id)
return Response(OperationLogManager.get_operator_list(model_id))
@detail_route(methods=["post"], url_path="operation_log")
@params_valid(serializer=OperationLogListSerializer)
def operation_log(self, request, model_id, params):
"""
@api {post} /datamanage/datamodel/models/:model_id/operation_log/ 操作记录
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_operation_log
@apiDescription 数据模型操作记录
@apiParam {Json} [conditions] 搜索条件参数,object_operation操作类型,object_type操作对象类型,created_by操作者,object
操作对象, 模糊搜索query
@apiParam {String} [start_time] 启始时间
@apiParam {String} [end_time] 终止时间
@apiParam {Int} page 页码
@apiParam {Int} page_size 每页条数
@apiParam {String} [order_by_created_at] 按照操作时间排序 desc/asc
@apiParam {String} bk_username 用户名
@apiSuccess (返回) {Int} data.count 模型ID
@apiSuccess (返回) {List} data.results 操作记录列表
@apiSuccess (返回) {String} data.results.object_operation 操作类型
@apiSuccess (返回) {String} data.results.object_type 操作对象类别
@apiSuccess (返回) {String} data.results.object_name 操作对象中文名
@apiSuccess (返回) {String} data.results.object_alias 操作对象英文名
@apiSuccess (返回) {String} data.results.description 描述
@apiSuccess (返回) {String} data.results.created_by 操作者
@apiSuccess (返回) {String} data.results.created_at 操作时间
@apiSuccess (返回) {String} data.results.id 操作id
@apiSuccess (返回) {String} data.results.object_id 操作对象id
@apiParamExample {json} 参数样例:
{
"conditions": [
{"key":"object_operation","value":["create"]},
{"key":"object_type","value":["model"]},
{"key":"created_by","value":["admin"]},
{"key":"query","value":["每日道具销售额"]},
],
"page": 1,
"page_size": 10,
"start_time":"2021-01-06 00:00:45",
"end_time":"2021-01-06 20:57:45",
"bk_username": admin
}
@apiParamExample {json} conditions内容:
{
"object_operation": {
"create": "新增",
"update": "变更",
"delete": "删除",
"release": "发布"
},
"object_type": {
"model": "数据模型",
"master_table": "主表",
"calculation_atom": "统计口径",
"indicator": "指标"
}
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"count": 10,
"results": [
{
"object_operation":"update",
"created_at":"2020-12-04 21:34:12",
"object_type":"master_table",
"object_id":"23",
"object_alias":"道具流水表",
"object_name":"fact_item_flow_15",
"created_by":"xx",
"id":10,
"description": null
}
]
}
}
"""
model_id = convert_to_number("model_id", model_id)
check_perm("datamodel.retrieve", model_id)
return Response(OperationLogManager.get_operation_log_list(model_id, params))
@detail_route(methods=["get"], url_path=r"operation_log/(?P<operation_id>\w+)/diff")
@params_valid(serializer=DataModelInfoSerializer)
def operation_log_diff(self, request, model_id, operation_id, params):
"""
@api {post} /datamanage/datamodel/models/:model_id/operation_log/:operation_id/diff 数据模型操作前后diff
@apiVersion 3.5.0
@apiGroup DataModel_OperationLog
@apiName datamodel_operation_log_diff
@apiDescription 数据模型操作记录diff
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"diff": {
"diff_objects": [
{
"diff_type": "update",
"object_type": "calculation_atom",
"diff_keys": [
"calculation_content.content.calculation_formula",
"calculation_formula"
],
"object_id": "calculation_atom-item_sales_test5"
}
]
},
"new_contents": {
"created_at": "2020-12-04 22:08:57",
"objects": [],
"created_by": "admin"
},
"orig_contents": {
"created_at": "2020-12-04 22:07:51",
"objects": [],
"created_by": "admin"
}
},
"result": true
}
"""
model_id = convert_to_number("model_id", model_id)
check_perm("datamodel.retrieve", model_id)
operation_id = convert_to_number("operation_id", operation_id)
return Response(OperationLogManager.diff_operation_log(operation_id))
def applied(self, request, model_id):
"""
@api {get} /datamanage/datamodel/models/:model_id/applied/ 模型应用列表
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_applied
@apiDescription 数据模型应用列表
@apiParam {Int} model_id 模型ID
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": []
}
"""
return Response(True)
@detail_route(methods=["post"], url_path="top")
@params_valid(serializer=BkUserNameSerializer)
def top(self, request, model_id, params):
"""
@api {post} /datamanage/datamodel/models/:model_id/top/ *模型置顶
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_top
@apiDescription 模型置顶
@apiParam {String} bk_username 用户名
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": true
}
"""
model_id = convert_to_number("model_id", model_id)
bk_username = get_request_username()
top_ret = DataModelManager.top_data_model(model_id, bk_username)
return Response(top_ret)
@detail_route(methods=["post"], url_path="cancel_top")
@params_valid(serializer=BkUserNameSerializer)
def cancel_top(self, request, model_id, params):
"""
@api {post} /datamanage/datamodel/models/:model_id/cancel_top/ *模型取消置顶
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_cancel_top
@apiDescription 模型取消置顶
@apiParam {String} bk_username 用户名
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": true
}
"""
model_id = convert_to_number("model_id", model_id)
bk_username = get_request_username()
cancel_top_ret = DataModelManager.cancel_top_data_model(model_id, bk_username)
return Response(cancel_top_ret)
@detail_route(methods=["post"], url_path="confirm_overview")
@params_valid(serializer=BkUserNameSerializer)
def confirm_overview(self, request, model_id, params):
"""
@api {post} /datamanage/datamodel/models/:model_id/confirm_overview/ *确认模型预览
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_confirm_overview
@apiDescription 确认模型预览,记录已完成步骤(仅模型预览后点击下一步调用)
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"step_id": 4
}
}
"""
model_id = convert_to_number("model_id", model_id)
bk_username = get_request_username()
check_perm("datamodel.update", model_id)
step_id = DataModelManager.confirm_data_model_overview(model_id, bk_username)
return Response({"step_id": step_id})
@detail_route(methods=["post"], url_path="confirm_indicators")
@params_valid(serializer=BkUserNameSerializer)
def confirm_indicators(self, request, model_id, params):
"""
@api {post} /datamanage/datamodel/models/:model_id/confirm_indicators/ *确认指标
@apiVersion 3.5.0
@apiGroup DataModel_Model
@apiName datamodel_model_confirm_indicators
@apiDescription 确认指标设计,记录已完成步骤(仅指标设计页面点击下一步调用)
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"step_id": 3
}
}
"""
model_id = convert_to_number("model_id", model_id)
bk_username = get_request_username()
check_perm("datamodel.update", model_id)
step_id = DataModelManager.confirm_indicators(model_id, bk_username)
return Response({"step_id": step_id})
class MasterTableViewSet(APIViewSet):
@params_valid(serializer=MasterTableCreateSerializer)
def create(self, request, model_id, params):
"""
@api {post} /datamanage/datamodel/models/:model_id/master_tables/ *创建和修改主表
@apiVersion 3.5.0
@apiGroup DataModel_MasterTable
@apiName datamodel_master_table_update
@apiDescription 创建和修改主表
@apiParam {List} fields 模型主表字段列表
@apiParam {Int} fields.model_id 模型ID
@apiParam {Int} fields.field_name 字段名称
@apiParam {Int} fields.field_alias 字段别名
@apiParam {String} fields.field_index 字段位置
@apiParam {String} fields.field_type 数据类型
@apiParam {String} fields.field_category 字段类型
@apiParam {String} fields.description 字段描述
@apiParam {Json} fields.field_constraint_content 字段约束内容
@apiParam {Json} fields.field_clean_content 清洗规则
@apiParam {Int} fields.source_model_id 来源模型id
@apiParam {String} fields.source_field_name 来源字段
@apiParam {List} [model_relation] 模型主表关联信息
@apiParamExample {json} 参数样例:
{
"fields":[
{
"model_id":32,
"field_name":"price",
"field_alias":"道具价格",
"field_index":1,
"field_type":"long",
"field_category":"measure",
"description":"道具价格111",
"field_constraint_content":{
"op": "OR",
"groups": [
{
"op": "AND",
"items": [
{"constraint_id": "", "constraint_content": ""},
{"constraint_id": "", "constraint_content": ""}
]
},
{
"op": "OR",
"items": [
{"constraint_id": "", "constraint_content": ""},
{"constraint_id": "", "constraint_content": ""}
]
}
]
},
"field_clean_content":{
"clean_option":"SQL",
"clean_content":"price * 100 as price"
},
"source_model_id":null,
"source_field_name":null
},
{
"model_id":32,
"field_name":"channel_id",
"field_alias":"渠道号",
"field_index":2,
"field_type":"string",
"field_category":"dimension",
"description":"渠道号",
"field_constraint_content":null,
"field_clean_content":null,
"source_model_id":null,
"source_field_name":null
},
{
"model_id":32,
"field_name":"channel_name",
"field_alias":"渠道名称",
"field_index":3,
"field_type":"string",
"field_category":"dimension",
"description":"渠道名称",
"field_constraint_content":null,
"field_clean_content":null,
"source_model_id":33,
"source_field_name":"channel_id"
},
{
"model_id":32,
"field_name":"__time__",
"field_alias":"时间字段",
"field_index":4,
"field_type":"timestamp",
"field_category":"dimension",
"description":"平台内置时间字段,数据入库后将转换为可查询字段,比如 dtEventTime/dtEventTimeStamp/localtime",
"field_constraint_content":null,
"field_clean_content":null,
"source_model_id":null,
"source_field_name":null
}
],
"model_relation":[
{
"model_id":32,
"field_name":"channel_id",
"related_model_id":33,
"related_field_name":"channel_id"
}
]
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"step_id": 2
}
}
"""
model_id = convert_to_number("model_id", model_id)
bk_username = get_request_username()
check_perm("datamodel.update", model_id)
# 修改主表
master_table_dict = MasterTableManager.update_master_table(model_id, params, bk_username)
return Response(master_table_dict)
@params_valid(serializer=MasterTableListSerializer)
def list(self, request, model_id, params):
"""
@api {get} /datamanage/datamodel/models/:model_id/master_tables/ *主表详情
@apiVersion 3.5.0
@apiGroup DataModel_MasterTable
@apiName datamodel_master_table_info
@apiDescription 主表详情
@apiParam {Boolean} with_time_field 是否展示时间字段,默认不展示
@apiParam {List} allow_field_type 允许展示的字段类型
@apiParam {List} with_details 展示字段详情,示例:['deletable', 'editable']
@apiSuccess (返回) {Int} data.model_id 模型ID
@apiSuccess (返回) {String} data.model_name 模型名称
@apiSuccess (返回) {String} data.model_alias 模型别名
@apiSuccess (返回) {String} data.table_name 主表名称
@apiSuccess (返回) {String} data.table_alias 主表别名
@apiSuccess (返回) {String} data.step_id 模型构建&发布完成步骤
@apiSuccess (返回) {List} data.fields 主表字段信息
@apiSuccess (返回) {Int} data.fields.model_id 模型ID
@apiSuccess (返回) {String} data.fields.field_name 字段名称
@apiSuccess (返回) {String} data.fields.field_alias 字段别名
@apiSuccess (返回) {Int} data.fields.field_index 字段位置
@apiSuccess (返回) {String} data.fields.field_type 数据类型
@apiSuccess (返回) {String} data.fields.field_category 字段类型
@apiSuccess (返回) {String} data.fields.description 字段描述
@apiSuccess (返回) {List} data.fields.field_constraint_content 字段约束内容
@apiSuccess (返回) {Json} data.fields.field_clean_content 清洗规则
@apiSuccess (返回) {Int} data.fields.source_model_id 来源模型id
@apiSuccess (返回) {String} data.fields.source_field_name 来源字段
@apiSuccess (返回) {Boolean} data.fields.is_join_field 是否关联字段
@apiSuccess (返回) {Boolean} data.fields.is_extended_field 是否扩展字段
@apiSuccess (返回) {String} data.fields.join_field_name 扩展字段对应的关联字段名称
@apiSuccess (返回) {String} data.fields.deletable 字段能否被删除
@apiSuccess (返回) {String} data.fields.editable 字段能否被编辑
@apiSuccess (返回) {String} data.fields.editable_deletable_info 字段能否被编辑、被删除详情
@apiSuccess (返回) {String} data.fields.editable_deletable_info.is_join_field 字段是否关联维度模型
@apiSuccess (返回) {String} data.fields.editable_deletable_info.is_source_field 字段是否被模型作为扩展字段
@apiSuccess (返回) {String} data.fields.editable_deletable_info.source_field_models 字段被什么模型作为扩展字段
@apiSuccess (返回) {String} data.fields.editable_deletable_info.is_used_by_other_fields 字段是否被其他字段加工逻辑引用
@apiSuccess (返回) {String} data.fields.editable_deletable_info.fields 字段被什么字段加工逻辑引用
@apiSuccess (返回) {String} data.fields.editable_deletable_info.is_used_by_calc_atom 字段是否被统计口径引用
@apiSuccess (返回) {String} data.fields.editable_deletable_info.calculation_atoms 字段被什么统计口径引用
@apiSuccess (返回) {String} data.fields.editable_deletable_info.is_aggregation_field 字段是否是指标聚合字段
@apiSuccess (返回) {String} data.fields.editable_deletable_info.aggregation_field_indicators 字段是什么指标聚合字段
@apiSuccess (返回) {String} data.fields.editable_deletable_info.is_condition_field 字段是否是指标过滤条件字段
@apiSuccess (返回) {String} data.fields.editable_deletable_info.condition_field_indicators 是什么指标过滤条件字段
@apiSuccess (返回) {List} data.model_relation 模型关联关系
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"model_id":1,
"model_name":"fact_item_flow",
"model_alias":"道具流水表",
"table_name":"fact_item_flow",
"table_alias":"道具流水表",
"fields":[
{
"model_id": 1,
"id": 1,
"field_name":"price",
"field_alias":"道具价格",
"field_index": 1,
"field_type":"long",
"field_category":"metric",
"description":"道具价格",
"field_constraint_content":[
{
"content": {
"constraint_id": "value_enum",
"constraint_content": "1,2"
}
}
],
"field_clean_content":{
"clean_option":"SQL",
"clean_content":"price * 100 as price"
},
"source_model_id":null,
"source_field_name":null,
"is_join_field": false,
"is_extended_field": false,
"join_field_name": null,
"deletable": True,
"editable": True,
"editable_deletable_info": {
"source_field_models": [],
"is_source_field": false,
"is_used_by_calc_atom": false,
"aggregation_field_indicators": [],
"is_aggregation_field": false,
"is_used_by_other_fields": false,
"is_join_field": true,
"condition_field_indicators": [],
"calculation_atoms": [],
"fields": [],
"is_condition_field": false
},
},
{
"model_id": 1,
"id": 2,
"field_name":"channel_id",
"field_alias":"渠道号",
"field_index": 2,
"field_type":"string",
"field_category":"dimension",
"description":"渠道号",
"field_constraint_content":[],
"field_clean_content":null,
"source_model_id":null,
"source_field_name":null,
"is_join_field": true,
"is_extended_field": false,
"join_field_name": null,
},
{
"model_id":32,
"field_name":"__time__",
"field_alias":"时间字段",
"field_index":4,
"field_type":"timestamp",
"field_category":"dimension",
"description":"平台内置时间字段,数据入库后转换为可查询字段,如dtEventTime/dtEventTimeStamp/localtime",
"field_constraint_content":[],
"field_clean_content":null,
"source_model_id":null,
"source_field_name":null,
"is_join_field": false,
"is_extended_field": false,
"join_field_name": null,
}
],
"model_relation":[
{
"model_id": 1
"field_name":"channel_id",
"related_model_id":2,
"related_field_name":"channel_id",
"related_method":"left-join"
}
]
}
"""
# 主表详情
model_id = convert_to_number("model_id", model_id)
check_perm("datamodel.retrieve", model_id)
with_time_field = params["with_time_field"]
allow_field_type = params["allow_field_type"]
with_details = params["with_details"]
latest_version = params["latest_version"]
# 返回草稿态主表信息
master_table_dict = MasterTableManager.get_master_table_info(
model_id, with_time_field, allow_field_type, with_details, latest_version
)
return Response(master_table_dict)
class FieldConstraintConfigViewSet(APIModelViewSet):
def list(self, request):
"""
@api {get} /datamanage/datamodel/field_constraint_configs/ *字段约束配置列表
@apiVersion 3.5.0
@apiGroup FieldConstraintConfig
@apiName datamodel_field_constraint_config_list
@apiDescription 字段约束配置列表
@apiSuccess (返回) {String} data.constraint_id 字段约束英文名
@apiSuccess (返回) {String} data.constraint_name 字段约束中文名
@apiSuccess (返回) {String} data.constraint_value 字段约束内容/示例,例如:(100,200]
@apiSuccess (返回) {Boolean} data.editable 字段约束内容是否可编辑
@apiSuccess (返回) {String} data.constraint_type 字段约束类型:general:通用,specific:特定
@apiSuccess (返回) {Json} data.validator 约束校验内容
@apiSuccess (返回) {Boolean} data.description 约束说明
@apiSuccess (返回) {List} data.allow_field_type 允许的数据类型
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": [
{
"description": null,
"constraint_id": "start_with",
"editable": true,
"constraint_type": "general",
"constraint_value": "http",
"constraint_name": "开头是",
"validator": {
"content": null,
"type": "string_validator"
},
"allow_field_type": [
"string"
]
}
]
}
"""
constraint_list = get_field_constraint_tree_list()
return Response(constraint_list)
class FieldTypeConfigViewSet(APIModelViewSet):
@params_valid(serializer=FieldTypeListSerializer)
def list(self, request, params):
"""
@api {get} /datamanage/datamodel/field_type_configs/ *数据类型配置列表
@apiVersion 3.5.0
@apiGroup FieldConstraintConfig
@apiName datamodel_field_type_config_list
@apiDescription 数据类型配置列表
@apiParam {List} include_field_type 额外返回的数据类型列表
@apiParam {List} exclude_field_type 不返回的数据类型列表
@apiSuccess (返回) {String} data.field_type 数据类型ID
@apiSuccess (返回) {String} data.field_type_alias 数据类型中文名
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": [
{
"field_type":"double",
"field_type_alias":"浮点型"
}
]
}
"""
include_field_type = params["include_field_type"]
exclude_field_type = params["exclude_field_type"]
field_type_list = get_field_type_configs(include_field_type, exclude_field_type)
return Response(field_type_list)
class StandardFieldViewSet(APIModelViewSet):
def list(self, request):
"""
@api {get} /datamanage/datamodel/standard_fields/ 公共字段列表
@apiVersion 3.5.0
@apiGroup Datamodel_StandardField
@apiName datamodel_standard_field_list
@apiDescription 公共字段列表,后期要考虑按照模型tag、schema相似度做字段推荐
@apiParam {String} [fuzzy] 模糊过滤
@apiParam {String} [field_name] 字段名称
@apiSuccess (返回) {String} data.field_name 字段名称
@apiSuccess (返回) {String} data.field_alias 字段别名
@apiSuccess (返回) {String} data.field_type 字段类型
@apiSuccess (返回) {String} data.field_category 字段分类
@apiSuccess (返回) {String} data.description 字段描述
@apiSuccess (返回) {String} data.field_constraint_content 字段约束内容
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": [
{
"field_name": "zone_id",
"field_alias": "大区ID",
"field_type": "string",
"field_category": "dimension",
"description": "大区ID",
"field_constraint_content": null
}
]
}
"""
return Response()
class ResultTableViewSet(APIViewSet):
lookup_field = "rt_id"
@detail_route(methods=["get"], url_path="fields")
@params_valid(serializer=ResultTableFieldListSerializer)
def fields(self, request, rt_id, params):
"""
@api {get} /datamanage/datamodel/result_tables/:rt_id/fields/ rt字段列表
@apiVersion 3.5.0
@apiGroup ResultTableField
@apiName datamodel_result_table_fields_list
@apiDescription rt字段列表,默认不返回时间类型字段
@apiParam {Boolean} [with_time_field] 是否返回时间类型字段,默认不返回
@apiSuccess (返回) {String} data.field_name 字段名称
@apiSuccess (返回) {String} data.field_alias 字段别名
@apiSuccess (返回) {String} data.field_type 字段类型
@apiSuccess (返回) {String} data.field_category 字段分类
@apiSuccess (返回) {String} data.description 字段描述
@apiSuccess (返回) {String} data.field_constraint_content 字段约束内容
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": [
{
"field_type": "string",
"field_alias": "client_ip",
"description": "client_ip",
"roles": {
"event_time": false
},
"created_at": "2019-03-15 22:13:16",
"is_dimension": false,
"created_by": "admin",
"updated_at": "2019-03-15 22:13:16",
"origins": "",
"field_name": "client_ip",
"id": 11199,
"field_index": 2,
"updated_by": ""
}
]
}
"""
fields = MetaApi.result_tables.fields({"result_table_id": rt_id}, raise_exception=True).data
if params["with_time_field"]:
return Response(fields)
filtered_fields = [
field_dict
for field_dict in fields
if not (
field_dict["field_type"] == TimeField.TIME_FIELD_TYPE
or field_dict["field_name"].lower() in InnerField.INNER_FIELD_LIST
or field_dict["field_name"] in InnerField.INNER_FIELD_LIST
)
]
filtered_fields.append(TimeField.TIME_FIELD_DICT)
return Response(filtered_fields)
| 1.203125
| 1
|
schedule/transformData/transformContext.py
|
JaviMiot/employeeSchedule
| 0
|
12782299
|
<gh_stars>0
from .transformData import TransformData
class TransformContext:
def __init__(self, strategy: TransformData):
self._strategy = strategy
@property
def strategy(self) -> TransformData:
return self._strategy
@strategy.setter
def strategy(self, strategy: TransformData):
self._strategy = strategy
def execute(self, data: dict()):
return self._strategy.convertDict(data)
| 2.4375
| 2
|
src/app/voltdb/voltdb_src/tests/scripts/Testvoltdbclient.py
|
OpenMPDK/SMDK
| 44
|
12782300
|
<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8
# This file is part of VoltDB.
# Copyright (C) 2008-2021 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import sys
import os
# ensure version 3.6+ of python
if sys.hexversion < 0x03060000:
sys.stderr.write("Python version 3.6 or greater is required.\n" +
"Please install a more recent Python release and retry.\n")
sys.exit(-1)
# add the path to the volt python client, just based on knowing
# where we are now
sys.path.append('../../lib/python')
import signal
import unittest
import datetime
import decimal
import socket
import threading
import struct
import subprocess
import time
import array
from voltdbclient import *
SERVER_NAME = "EchoServer"
decimal.getcontext().prec = 19
def signalHandler(server, signum, frame):
server.shutdown()
server.join()
raise Exception("Interrupted by SIGINT.")
class EchoServer(threading.Thread):
def __init__(self, cmd, lock):
threading.Thread.__init__(self)
self.__server_cmd = cmd
self.__lock = threading.Event()
self.__start = lock
def run(self):
server = subprocess.Popen(self.__server_cmd, shell=True, encoding='utf-8')
time.sleep(1)
self.__start.set()
self.__lock.wait()
# Get the server pid
jps = subprocess.Popen("jps", stdout=subprocess.PIPE, shell=True, encoding='utf-8')
(stdout, stderr) = jps.communicate()
pid = None
lines = stdout.split("\n")
for l in lines:
if SERVER_NAME in l:
pid = l.split()[0]
if pid == None:
return
# Should kill the server now
killer = subprocess.Popen("kill -9 %s" % (pid), shell=True, encoding='utf-8')
killer.communicate()
if killer.returncode != 0:
sys.stderr.write("Failed to kill the server process %d\n" % server.pid)
return
server.communicate()
def shutdown(self):
self.__lock.set()
class TestFastSerializer(unittest.TestCase):
byteArray = [None, 1, -21, 127]
int16Array = [None, 128, -256, 32767]
int32Array = [None, 0, -32768, 2147483647]
int64Array = [None, -52423, 2147483647, -9223372036854775807]
floatArray = [None, float("-inf"), float("nan"), -0.009999999776482582]
stringArray = [None, u"hello world", u"ça"]
binArray = [None, array.array('B', [0, 128, 255])]
dateArray = [None, datetime.datetime.now(),
datetime.datetime.utcfromtimestamp(0),
datetime.datetime.utcnow()]
decimalArray = [None,
decimal.Decimal("-837461"),
decimal.Decimal("8571391.193847158139"),
decimal.Decimal("-1348392.109386749180")]
ARRAY_BEGIN = 126
ARRAY_END = 127
def setUp(self):
self.fs = FastSerializer('localhost', 21212, None, None)
def tearDown(self):
self.fs.socket.close()
def sendAndCompare(self, type, value):
self.fs.writeWireType(type, value)
self.fs.prependLength()
self.fs.flush()
self.fs.bufferForRead()
t = self.fs.readByte()
self.assertEqual(t, type)
v = self.fs.read(type)
self.assertEqual(v, value)
def sendArrayAndCompare(self, type, value):
self.fs.writeWireTypeArray(type, value)
sys.stdout.flush()
self.fs.prependLength()
sys.stdout.flush()
self.fs.flush()
sys.stdout.flush()
self.fs.bufferForRead()
sys.stdout.flush()
self.assertEqual(self.fs.readByte(), type)
sys.stdout.flush()
self.assertEqual(list(self.fs.readArray(type)), value)
sys.stdout.flush()
def testByte(self):
for i in self.byteArray:
self.sendAndCompare(self.fs.VOLTTYPE_TINYINT, i)
def testShort(self):
for i in self.int16Array:
self.sendAndCompare(self.fs.VOLTTYPE_SMALLINT, i)
def testInt(self):
for i in self.int32Array:
self.sendAndCompare(self.fs.VOLTTYPE_INTEGER, i)
def testLong(self):
for i in self.int64Array:
self.sendAndCompare(self.fs.VOLTTYPE_BIGINT, i)
def testFloat(self):
type = self.fs.VOLTTYPE_FLOAT
for i in self.floatArray:
self.fs.writeWireType(type, i)
self.fs.prependLength()
self.fs.flush()
self.fs.bufferForRead()
self.assertEqual(self.fs.readByte(), type)
result = self.fs.readFloat64()
if isNaN(i):
self.assertTrue(isNaN(result))
else:
self.assertEqual(result, i)
def testString(self):
for i in self.stringArray:
self.sendAndCompare(self.fs.VOLTTYPE_STRING, i)
def testDate(self):
for i in self.dateArray:
self.sendAndCompare(self.fs.VOLTTYPE_TIMESTAMP, i)
def testDecimal(self):
for i in self.decimalArray:
self.sendAndCompare(self.fs.VOLTTYPE_DECIMAL, i)
def testArray(self):
self.fs.writeByte(self.ARRAY_BEGIN)
self.fs.prependLength()
self.fs.flush()
self.sendArrayAndCompare(self.fs.VOLTTYPE_TINYINT, self.byteArray)
self.sendArrayAndCompare(self.fs.VOLTTYPE_SMALLINT, self.int16Array)
self.sendArrayAndCompare(self.fs.VOLTTYPE_INTEGER, self.int32Array)
self.sendArrayAndCompare(self.fs.VOLTTYPE_BIGINT, self.int64Array)
self.sendArrayAndCompare(self.fs.VOLTTYPE_STRING, self.stringArray)
self.sendArrayAndCompare(self.fs.VOLTTYPE_TIMESTAMP, self.dateArray)
self.sendArrayAndCompare(self.fs.VOLTTYPE_DECIMAL, self.decimalArray)
self.fs.writeByte(self.ARRAY_END)
self.fs.prependLength()
self.fs.flush()
def testTable(self):
type = FastSerializer.VOLTTYPE_VOLTTABLE
table = VoltTable(self.fs)
table.columns.append(VoltColumn(type = FastSerializer.VOLTTYPE_TINYINT,
name = "id"))
table.columns.append(VoltColumn(type = FastSerializer.VOLTTYPE_BIGINT,
name = "bigint"))
table.columns.append(VoltColumn(type = FastSerializer.VOLTTYPE_STRING,
name = "name"))
table.columns.append(VoltColumn(type = FastSerializer.VOLTTYPE_VARBINARY,
name = "bin"))
table.columns.append(VoltColumn(type = FastSerializer.VOLTTYPE_TIMESTAMP,
name = "date"))
table.columns.append(VoltColumn(type = FastSerializer.VOLTTYPE_DECIMAL,
name = "money"))
table.tuples.append([self.byteArray[1], self.int64Array[2],
self.stringArray[0], self.binArray[0], self.dateArray[2],
self.decimalArray[0]])
table.tuples.append([self.byteArray[2], self.int64Array[1],
self.stringArray[2], self.binArray[1], self.dateArray[1],
self.decimalArray[1]])
#table.tuples.append([self.byteArray[0], self.int64Array[0],
# self.stringArray[1], self.binArray[1], self.dateArray[0],
# self.decimalArray[2]])
self.fs.writeByte(type)
table.writeToSerializer()
self.fs.prependLength()
self.fs.flush()
self.fs.bufferForRead()
self.assertEqual(self.fs.readByte(), type)
result = VoltTable(self.fs)
result.readFromSerializer()
self.assertEqual(result, table)
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.exit(-1)
lock = threading.Event()
echo = EchoServer(sys.argv[1], lock)
handler = lambda x, y: signalHandler(echo, x, y)
signal.signal(signal.SIGINT, handler)
echo.start()
lock.wait()
del sys.argv[1]
try:
unittest.main()
except SystemExit:
echo.shutdown()
echo.join()
raise
| 1.820313
| 2
|
saleor/shipping/utils.py
|
fairhopeweb/saleor
| 15,337
|
12782301
|
<gh_stars>1000+
from typing import TYPE_CHECKING, Optional
from django_countries import countries
from .interface import ShippingMethodData
if TYPE_CHECKING:
from .models import ShippingMethod
def default_shipping_zone_exists(zone_pk=None):
from .models import ShippingZone
return ShippingZone.objects.exclude(pk=zone_pk).filter(default=True)
def get_countries_without_shipping_zone():
"""Return countries that are not assigned to any shipping zone."""
from .models import ShippingZone
covered_countries = set()
for zone in ShippingZone.objects.all():
covered_countries.update({c.code for c in zone.countries})
return (country[0] for country in countries if country[0] not in covered_countries)
def convert_to_shipping_method_data(
shipping_method: Optional["ShippingMethod"],
) -> Optional["ShippingMethodData"]:
if not shipping_method:
return None
return ShippingMethodData(
id=str(shipping_method.id),
name=shipping_method.name,
price=getattr(shipping_method, "price", None),
description=shipping_method.description,
type=shipping_method.type,
excluded_products=shipping_method.excluded_products,
channel_listings=shipping_method.channel_listings,
minimum_order_weight=shipping_method.minimum_order_weight,
maximum_order_weight=shipping_method.maximum_order_weight,
maximum_delivery_days=shipping_method.maximum_delivery_days,
minimum_delivery_days=shipping_method.minimum_delivery_days,
metadata=shipping_method.metadata,
private_metadata=shipping_method.private_metadata,
)
| 2.484375
| 2
|
src/vartools/dynamical_systems/__init__.py
|
hubernikus/various_tools
| 0
|
12782302
|
<reponame>hubernikus/various_tools
"""
The :mod:`DynamicalSystem` module implements mixture modeling algorithms.
"""
# Various Dynamical Systems
from ._base import allow_max_velocity, DynamicalSystem
from .linear import LinearSystem, ConstantValue
from .circle_stable import CircularStable
from .circular_and_linear import CircularLinear
from .spiral_motion import SpiralStable
from .locally_rotated import LocallyRotated
from .quadratic_axis_convergence import QuadraticAxisConvergence
from .multiattractor_dynamics import PendulumDynamics, DuffingOscillator, BifurcationSpiral
from .sinus_attractor import SinusAttractorSystem
# Various Dynamical System Adaptation Functions
from .velocity_trimmer import BaseTrimmer, ConstVelocityDecreasingAtAttractor
# Helper functions for visualization
from .plot_vectorfield import plot_dynamical_system
from .plot_vectorfield import plot_dynamical_system_quiver
from .plot_vectorfield import plot_dynamical_system_streamplot
__all__ = ['allow_max_velocity',
'DynamicalSystem',
'LinearSystem',
'ConstantValue',
'CircularStable'
'SpiralStable',
'LocallyRotated',
'QuadraticAxisConvergence',
'PendulumDynamics',
'DuffingOscillator',
'BifurcationSpiral',
'SinusAttractorSystem',
'BaseTrimmer',
'ConstVelocityDecreasingAtAttractor',
'plot_dynamical_system',
'plot_dynamical_system_quiver',
'plot_dynamical_system_streamplot',
]
| 1.640625
| 2
|
NLP/roberta/tokenizer/Conversation.py
|
x54-729/models
| 0
|
12782303
|
import uuid
from typing import List, Optional
from .utils import logging
logger = logging.get_logger(__name__)
class Conversation:
"""
Utility class containing a conversation and its history. This class is meant to be used as an input to the
:class:`~transformers.ConversationalPipeline`. The conversation contains a number of utility function to manage the
addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input
before being passed to the :class:`~transformers.ConversationalPipeline`. This user input is either created when
the class is instantiated, or by calling :obj:`conversational_pipeline.append_response("input")` after a
conversation turn.
Arguments:
text (:obj:`str`, `optional`):
The initial user input to start the conversation. If not provided, a user input needs to be provided
manually using the :meth:`~transformers.Conversation.add_user_input` method before the conversation can
begin.
conversation_id (:obj:`uuid.UUID`, `optional`):
Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the
conversation.
past_user_inputs (:obj:`List[str]`, `optional`):
Eventual past history of the conversation of the user. You don't need to pass it manually if you use the
pipeline interactively but if you want to recreate history you need to set both :obj:`past_user_inputs` and
:obj:`generated_responses` with equal length lists of strings
generated_responses (:obj:`List[str]`, `optional`):
Eventual past history of the conversation of the model. You don't need to pass it manually if you use the
pipeline interactively but if you want to recreate history you need to set both :obj:`past_user_inputs` and
:obj:`generated_responses` with equal length lists of strings
Usage::
conversation = Conversation("Going to the movies tonight - any suggestions?")
# Steps usually performed by the model when generating a response:
# 1. Mark the user input as processed (moved to the history)
conversation.mark_processed()
# 2. Append a mode response
conversation.append_response("The Big lebowski.")
conversation.add_user_input("Is it good?")
"""
def __init__(
self, text: str = None, conversation_id: uuid.UUID = None, past_user_inputs=None, generated_responses=None
):
if not conversation_id:
conversation_id = uuid.uuid4()
if past_user_inputs is None:
past_user_inputs = []
if generated_responses is None:
generated_responses = []
self.uuid: uuid.UUID = conversation_id
self.past_user_inputs: List[str] = past_user_inputs
self.generated_responses: List[str] = generated_responses
self.new_user_input: Optional[str] = text
def __eq__(self, other):
if not isinstance(other, Conversation):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def add_user_input(self, text: str, overwrite: bool = False):
"""
Add a user input to the conversation for the next round. This populates the internal :obj:`new_user_input`
field.
Args:
text (:obj:`str`): The user input for the next conversation round.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not existing and unprocessed user input should be overwritten when this function is called.
"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".'
)
self.new_user_input = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input'
)
else:
self.new_user_input = text
def mark_processed(self):
"""
Mark the conversation as processed (moves the content of :obj:`new_user_input` to :obj:`past_user_inputs`) and
empties the :obj:`new_user_input` field.
"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
self.new_user_input = None
def append_response(self, response: str):
"""
Append a response to the list of generated responses.
Args:
response (:obj:`str`): The model generated response.
"""
self.generated_responses.append(response)
def iter_texts(self):
"""
Iterates over all blobs of the conversation.
Returns: Iterator of (is_user, text_chunk) in chronological order of the conversation. ``is_user`` is a
:obj:`bool`, ``text_chunks`` is a :obj:`str`.
"""
for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__(self):
"""
Generates a string representation of the conversation.
Return:
:obj:`str`:
Example: Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user >> Going to the movies tonight - any
suggestions? bot >> The Big Lebowski
"""
output = f"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
name = "user" if is_user else "bot"
output += f"{name} >> {text} \n"
return output
| 3.703125
| 4
|
query.py
|
scotte216/Data-Stream
| 0
|
12782304
|
import argparse
from datetime import datetime
from Common.functions import add_data, get_filtered_stb, get_data
parser = argparse.ArgumentParser(description='Data-stream import and searching. Expected input data-stream line\n' +
'of the form: STB|TITLE|PROVIDER|DATE|REVENUE|TIME\n')
parser.add_argument('-i', dest='filename', help='import FILENAME to import data to datastore')
parser.add_argument('-s', dest='select',
help='SELECT from comma separated list of columns (STB,TITLE,PROVIDER,DATE,REV,TIME)')
parser.add_argument('-f', dest='filter',
help='FILTER from one column=value pair. CASE SENSITIVE. ex -f date=2017-04-21')
parser.add_argument('-o', dest='order',
help='ORDER from comma separated list of columns (STB,TITLE,PROVIDER,DATE,REV,TIME)')
args = parser.parse_args()
"""
If importing data:
Import data stream from argument filename. Expected format:
STB|TITLE|PROVIDER|DATE|REVENUE|TIME\n
"""
if args.filename:
count = 0
with open(args.filename, 'r') as file:
for line in file:
try:
box_id, title, provider, date, revenue, time = line.rstrip('\r\n').split('|')
time = datetime.strptime(time, '%H:%M')
date = datetime.strptime(date, '%Y-%m-%d')
data = {
'stb': box_id,
'date': date.strftime('%Y-%m-%d'),
'title': title,
'provider': provider,
'rev': "{0:.2f}".format(float(revenue)),
'time': time.strftime('%H:%M')
}
add_data(data)
count += 1
except ValueError as e:
print("Mal-formatted line. Skipping.")
print("Imported {} records.".format(count))
# Else, retrieving data. Data retrieval from SELECT, FILTER, and ORDER arguments
else:
# Error checking retrieval arguments
columns = {'stb', 'title', 'provider', 'date', 'rev', 'time'}
selection = args.select.lower().split(',') if args.select else None
if not selection or not set(selection) < columns:
print("Invalid SELECT argument(s). See --help for help.")
exit(1)
order = args.order.lower().split(',') if args.order else None
if order and not set(order) < columns and not set(order) < set(selection):
print("Invalid ORDER arguments(s). See --help for help.")
exit(1)
filter_by = ()
if args.filter:
key, value = tuple(args.filter.split('='))
if key not in columns:
print("Invalid FILTER argument(s). See --help for help.")
exit(1)
if key == 'rev':
try:
value = "{0:.2f}".format(float(value))
except ValueError:
print("Invalid number for rev filter.")
exit(1)
filter_by = (key, value)
# Retrieve set of matching STB id numbers based on the filter
matching_stb = get_filtered_stb(filter_by)
# If there are any matching STB id numbers, get actual data, order, and print SELECT results.
if matching_stb:
results = get_data(matching_stb, selection, filter_by, order)
# Print results in order of SELECT
for entry in results:
print(','.join([entry[key] for key in selection]))
| 2.90625
| 3
|
demo_package/__init__.py
|
xiaocai2333/setuptools_demo
| 0
|
12782305
|
def demo():
print("This is a test package demo!")
if __name__=='__main__':
demo()
| 1.28125
| 1
|
Lib/site-packages/psycopg/pq/pq_ctypes.py
|
CirculusVCFB/example-fastapi
| 0
|
12782306
|
<filename>Lib/site-packages/psycopg/pq/pq_ctypes.py
"""
libpq Python wrapper using ctypes bindings.
Clients shouldn't use this module directly, unless for testing: they should use
the `pq` module instead, which is in charge of choosing the best
implementation.
"""
# Copyright (C) 2020 The Psycopg Team
import logging
from os import getpid
from weakref import ref
from functools import partial
from ctypes import Array, pointer, string_at, create_string_buffer, byref
from ctypes import addressof, c_char_p, c_int, c_size_t, c_ulong
from typing import Any, Callable, List, Optional, Sequence, Tuple
from typing import cast as t_cast, TYPE_CHECKING
from .. import errors as e
from . import _pq_ctypes as impl
from .misc import PGnotify, ConninfoOption, PGresAttDesc
from .misc import error_message, connection_summary
from ._enums import Format, ExecStatus
# Imported locally to call them from __del__ methods
from ._pq_ctypes import PQclear, PQfinish, PQfreeCancel, PQstatus
if TYPE_CHECKING:
from . import abc
__impl__ = "python"
logger = logging.getLogger("psycopg")
def version() -> int:
"""Return the version number of the libpq currently loaded.
The number is in the same format of `~psycopg.ConnectionInfo.server_version`.
Certain features might not be available if the libpq library used is too old.
"""
return impl.PQlibVersion()
def notice_receiver(
arg: Any, result_ptr: impl.PGresult_struct, wconn: "ref[PGconn]"
) -> None:
pgconn = wconn()
if not (pgconn and pgconn.notice_handler):
return
res = PGresult(result_ptr)
try:
pgconn.notice_handler(res)
except Exception as exc:
logger.exception("error in notice receiver: %s", exc)
res._pgresult_ptr = None # avoid destroying the pgresult_ptr
class PGconn:
"""
Python representation of a libpq connection.
"""
__slots__ = (
"_pgconn_ptr",
"notice_handler",
"notify_handler",
"_notice_receiver",
"_procpid",
"__weakref__",
)
def __init__(self, pgconn_ptr: impl.PGconn_struct):
self._pgconn_ptr: Optional[impl.PGconn_struct] = pgconn_ptr
self.notice_handler: Optional[Callable[["abc.PGresult"], None]] = None
self.notify_handler: Optional[Callable[[PGnotify], None]] = None
self._notice_receiver = impl.PQnoticeReceiver( # type: ignore
partial(notice_receiver, wconn=ref(self))
)
impl.PQsetNoticeReceiver(pgconn_ptr, self._notice_receiver, None)
self._procpid = getpid()
def __del__(self) -> None:
# Close the connection only if it was created in this process,
# not if this object is being GC'd after fork.
if getpid() == self._procpid:
self.finish()
def __repr__(self) -> str:
cls = f"{self.__class__.__module__}.{self.__class__.__qualname__}"
info = connection_summary(self)
return f"<{cls} {info} at 0x{id(self):x}>"
@classmethod
def connect(cls, conninfo: bytes) -> "PGconn":
if not isinstance(conninfo, bytes):
raise TypeError(f"bytes expected, got {type(conninfo)} instead")
pgconn_ptr = impl.PQconnectdb(conninfo)
if not pgconn_ptr:
raise MemoryError("couldn't allocate PGconn")
return cls(pgconn_ptr)
@classmethod
def connect_start(cls, conninfo: bytes) -> "PGconn":
if not isinstance(conninfo, bytes):
raise TypeError(f"bytes expected, got {type(conninfo)} instead")
pgconn_ptr = impl.PQconnectStart(conninfo)
if not pgconn_ptr:
raise MemoryError("couldn't allocate PGconn")
return cls(pgconn_ptr)
def connect_poll(self) -> int:
return self._call_int(impl.PQconnectPoll)
def finish(self) -> None:
self._pgconn_ptr, p = None, self._pgconn_ptr
if p:
PQfinish(p)
@property
def pgconn_ptr(self) -> Optional[int]:
"""The pointer to the underlying ``PGconn`` structure, as integer.
`!None` if the connection is closed.
The value can be used to pass the structure to libpq functions which
psycopg doesn't (currently) wrap, either in C or in Python using FFI
libraries such as `ctypes`.
"""
if self._pgconn_ptr is None:
return None
return addressof(self._pgconn_ptr.contents) # type: ignore[attr-defined]
@property
def info(self) -> List["ConninfoOption"]:
self._ensure_pgconn()
opts = impl.PQconninfo(self._pgconn_ptr)
if not opts:
raise MemoryError("couldn't allocate connection info")
try:
return Conninfo._options_from_array(opts)
finally:
impl.PQconninfoFree(opts)
def reset(self) -> None:
self._ensure_pgconn()
impl.PQreset(self._pgconn_ptr)
def reset_start(self) -> None:
if not impl.PQresetStart(self._pgconn_ptr):
raise e.OperationalError("couldn't reset connection")
def reset_poll(self) -> int:
return self._call_int(impl.PQresetPoll)
@classmethod
def ping(self, conninfo: bytes) -> int:
if not isinstance(conninfo, bytes):
raise TypeError(f"bytes expected, got {type(conninfo)} instead")
return impl.PQping(conninfo)
@property
def db(self) -> bytes:
return self._call_bytes(impl.PQdb)
@property
def user(self) -> bytes:
return self._call_bytes(impl.PQuser)
@property
def password(self) -> bytes:
return self._call_bytes(impl.PQpass)
@property
def host(self) -> bytes:
return self._call_bytes(impl.PQhost)
@property
def hostaddr(self) -> bytes:
return self._call_bytes(impl.PQhostaddr)
@property
def port(self) -> bytes:
return self._call_bytes(impl.PQport)
@property
def tty(self) -> bytes:
return self._call_bytes(impl.PQtty)
@property
def options(self) -> bytes:
return self._call_bytes(impl.PQoptions)
@property
def status(self) -> int:
return PQstatus(self._pgconn_ptr)
@property
def transaction_status(self) -> int:
return impl.PQtransactionStatus(self._pgconn_ptr)
def parameter_status(self, name: bytes) -> Optional[bytes]:
self._ensure_pgconn()
return impl.PQparameterStatus(self._pgconn_ptr, name)
@property
def error_message(self) -> bytes:
return impl.PQerrorMessage(self._pgconn_ptr)
@property
def protocol_version(self) -> int:
return self._call_int(impl.PQprotocolVersion)
@property
def server_version(self) -> int:
return self._call_int(impl.PQserverVersion)
@property
def socket(self) -> int:
rv = self._call_int(impl.PQsocket)
if rv == -1:
raise e.OperationalError("the connection is lost")
return rv
@property
def backend_pid(self) -> int:
return self._call_int(impl.PQbackendPID)
@property
def needs_password(self) -> bool:
return bool(impl.PQconnectionNeedsPassword(self._pgconn_ptr))
@property
def used_password(self) -> bool:
return bool(impl.PQconnectionUsedPassword(self._pgconn_ptr))
@property
def ssl_in_use(self) -> bool:
return self._call_bool(impl.PQsslInUse)
def exec_(self, command: bytes) -> "PGresult":
if not isinstance(command, bytes):
raise TypeError(f"bytes expected, got {type(command)} instead")
self._ensure_pgconn()
rv = impl.PQexec(self._pgconn_ptr, command)
if not rv:
raise MemoryError("couldn't allocate PGresult")
return PGresult(rv)
def send_query(self, command: bytes) -> None:
if not isinstance(command, bytes):
raise TypeError(f"bytes expected, got {type(command)} instead")
self._ensure_pgconn()
if not impl.PQsendQuery(self._pgconn_ptr, command):
raise e.OperationalError(
f"sending query failed: {error_message(self)}"
)
def exec_params(
self,
command: bytes,
param_values: Optional[Sequence[Optional[bytes]]],
param_types: Optional[Sequence[int]] = None,
param_formats: Optional[Sequence[int]] = None,
result_format: int = Format.TEXT,
) -> "PGresult":
args = self._query_params_args(
command, param_values, param_types, param_formats, result_format
)
self._ensure_pgconn()
rv = impl.PQexecParams(*args)
if not rv:
raise MemoryError("couldn't allocate PGresult")
return PGresult(rv)
def send_query_params(
self,
command: bytes,
param_values: Optional[Sequence[Optional[bytes]]],
param_types: Optional[Sequence[int]] = None,
param_formats: Optional[Sequence[int]] = None,
result_format: int = Format.TEXT,
) -> None:
args = self._query_params_args(
command, param_values, param_types, param_formats, result_format
)
self._ensure_pgconn()
if not impl.PQsendQueryParams(*args):
raise e.OperationalError(
f"sending query and params failed: {error_message(self)}"
)
def send_prepare(
self,
name: bytes,
command: bytes,
param_types: Optional[Sequence[int]] = None,
) -> None:
atypes: Optional[Array[impl.Oid]]
if not param_types:
nparams = 0
atypes = None
else:
nparams = len(param_types)
atypes = (impl.Oid * nparams)(*param_types)
self._ensure_pgconn()
if not impl.PQsendPrepare(
self._pgconn_ptr, name, command, nparams, atypes
):
raise e.OperationalError(
f"sending query and params failed: {error_message(self)}"
)
def send_query_prepared(
self,
name: bytes,
param_values: Optional[Sequence[Optional[bytes]]],
param_formats: Optional[Sequence[int]] = None,
result_format: int = Format.TEXT,
) -> None:
# repurpose this function with a cheeky replacement of query with name,
# drop the param_types from the result
args = self._query_params_args(
name, param_values, None, param_formats, result_format
)
args = args[:3] + args[4:]
self._ensure_pgconn()
if not impl.PQsendQueryPrepared(*args):
raise e.OperationalError(
f"sending prepared query failed: {error_message(self)}"
)
def _query_params_args(
self,
command: bytes,
param_values: Optional[Sequence[Optional[bytes]]],
param_types: Optional[Sequence[int]] = None,
param_formats: Optional[Sequence[int]] = None,
result_format: int = Format.TEXT,
) -> Any:
if not isinstance(command, bytes):
raise TypeError(f"bytes expected, got {type(command)} instead")
aparams: Optional[Array[c_char_p]]
alenghts: Optional[Array[c_int]]
if param_values:
nparams = len(param_values)
aparams = (c_char_p * nparams)(
*(
# convert bytearray/memoryview to bytes
# TODO: avoid copy, at least in the C implementation.
b
if b is None or isinstance(b, bytes)
else bytes(b) # type: ignore[arg-type]
for b in param_values
)
)
alenghts = (c_int * nparams)(
*(len(p) if p else 0 for p in param_values)
)
else:
nparams = 0
aparams = alenghts = None
atypes: Optional[Array[impl.Oid]]
if not param_types:
atypes = None
else:
if len(param_types) != nparams:
raise ValueError(
"got %d param_values but %d param_types"
% (nparams, len(param_types))
)
atypes = (impl.Oid * nparams)(*param_types)
if not param_formats:
aformats = None
else:
if len(param_formats) != nparams:
raise ValueError(
"got %d param_values but %d param_formats"
% (nparams, len(param_formats))
)
aformats = (c_int * nparams)(*param_formats)
return (
self._pgconn_ptr,
command,
nparams,
atypes,
aparams,
alenghts,
aformats,
result_format,
)
def prepare(
self,
name: bytes,
command: bytes,
param_types: Optional[Sequence[int]] = None,
) -> "PGresult":
if not isinstance(name, bytes):
raise TypeError(f"'name' must be bytes, got {type(name)} instead")
if not isinstance(command, bytes):
raise TypeError(
f"'command' must be bytes, got {type(command)} instead"
)
if not param_types:
nparams = 0
atypes = None
else:
nparams = len(param_types)
atypes = (impl.Oid * nparams)(*param_types)
self._ensure_pgconn()
rv = impl.PQprepare(self._pgconn_ptr, name, command, nparams, atypes)
if not rv:
raise MemoryError("couldn't allocate PGresult")
return PGresult(rv)
def exec_prepared(
self,
name: bytes,
param_values: Optional[Sequence[bytes]],
param_formats: Optional[Sequence[int]] = None,
result_format: int = 0,
) -> "PGresult":
if not isinstance(name, bytes):
raise TypeError(f"'name' must be bytes, got {type(name)} instead")
aparams: Optional[Array[c_char_p]]
alenghts: Optional[Array[c_int]]
if param_values:
nparams = len(param_values)
aparams = (c_char_p * nparams)(*param_values)
alenghts = (c_int * nparams)(
*(len(p) if p else 0 for p in param_values)
)
else:
nparams = 0
aparams = alenghts = None
if not param_formats:
aformats = None
else:
if len(param_formats) != nparams:
raise ValueError(
"got %d param_values but %d param_types"
% (nparams, len(param_formats))
)
aformats = (c_int * nparams)(*param_formats)
self._ensure_pgconn()
rv = impl.PQexecPrepared(
self._pgconn_ptr,
name,
nparams,
aparams,
alenghts,
aformats,
result_format,
)
if not rv:
raise MemoryError("couldn't allocate PGresult")
return PGresult(rv)
def describe_prepared(self, name: bytes) -> "PGresult":
if not isinstance(name, bytes):
raise TypeError(f"'name' must be bytes, got {type(name)} instead")
self._ensure_pgconn()
rv = impl.PQdescribePrepared(self._pgconn_ptr, name)
if not rv:
raise MemoryError("couldn't allocate PGresult")
return PGresult(rv)
def send_describe_prepared(self, name: bytes) -> None:
if not isinstance(name, bytes):
raise TypeError(f"bytes expected, got {type(name)} instead")
self._ensure_pgconn()
if not impl.PQsendDescribePrepared(self._pgconn_ptr, name):
raise e.OperationalError(
f"sending describe prepared failed: {error_message(self)}"
)
def describe_portal(self, name: bytes) -> "PGresult":
if not isinstance(name, bytes):
raise TypeError(f"'name' must be bytes, got {type(name)} instead")
self._ensure_pgconn()
rv = impl.PQdescribePortal(self._pgconn_ptr, name)
if not rv:
raise MemoryError("couldn't allocate PGresult")
return PGresult(rv)
def send_describe_portal(self, name: bytes) -> None:
if not isinstance(name, bytes):
raise TypeError(f"bytes expected, got {type(name)} instead")
self._ensure_pgconn()
if not impl.PQsendDescribePortal(self._pgconn_ptr, name):
raise e.OperationalError(
f"sending describe portal failed: {error_message(self)}"
)
def get_result(self) -> Optional["PGresult"]:
rv = impl.PQgetResult(self._pgconn_ptr)
return PGresult(rv) if rv else None
def consume_input(self) -> None:
if 1 != impl.PQconsumeInput(self._pgconn_ptr):
raise e.OperationalError(
f"consuming input failed: {error_message(self)}"
)
def is_busy(self) -> int:
return impl.PQisBusy(self._pgconn_ptr)
@property
def nonblocking(self) -> int:
return impl.PQisnonblocking(self._pgconn_ptr)
@nonblocking.setter
def nonblocking(self, arg: int) -> None:
if 0 > impl.PQsetnonblocking(self._pgconn_ptr, arg):
raise e.OperationalError(
f"setting nonblocking failed: {error_message(self)}"
)
def flush(self) -> int:
if not self._pgconn_ptr:
raise e.OperationalError(
"flushing failed: the connection is closed"
)
rv: int = impl.PQflush(self._pgconn_ptr)
if rv < 0:
raise e.OperationalError(f"flushing failed: {error_message(self)}")
return rv
def set_single_row_mode(self) -> None:
if not impl.PQsetSingleRowMode(self._pgconn_ptr):
raise e.OperationalError("setting single row mode failed")
def get_cancel(self) -> "PGcancel":
"""
Create an object with the information needed to cancel a command.
See :pq:`PQgetCancel` for details.
"""
rv = impl.PQgetCancel(self._pgconn_ptr)
if not rv:
raise e.OperationalError("couldn't create cancel object")
return PGcancel(rv)
def notifies(self) -> Optional[PGnotify]:
ptr = impl.PQnotifies(self._pgconn_ptr)
if ptr:
c = ptr.contents
return PGnotify(c.relname, c.be_pid, c.extra)
impl.PQfreemem(ptr)
else:
return None
def put_copy_data(self, buffer: bytes) -> int:
# TODO: should be done without copy
if not isinstance(buffer, bytes):
buffer = bytes(buffer)
rv = impl.PQputCopyData(self._pgconn_ptr, buffer, len(buffer))
if rv < 0:
raise e.OperationalError(
f"sending copy data failed: {error_message(self)}"
)
return rv
def put_copy_end(self, error: Optional[bytes] = None) -> int:
rv = impl.PQputCopyEnd(self._pgconn_ptr, error)
if rv < 0:
raise e.OperationalError(
f"sending copy end failed: {error_message(self)}"
)
return rv
def get_copy_data(self, async_: int) -> Tuple[int, memoryview]:
buffer_ptr = c_char_p()
nbytes = impl.PQgetCopyData(
self._pgconn_ptr, byref(buffer_ptr), async_
)
if nbytes == -2:
raise e.OperationalError(
f"receiving copy data failed: {error_message(self)}"
)
if buffer_ptr:
# TODO: do it without copy
data = string_at(buffer_ptr, nbytes)
impl.PQfreemem(buffer_ptr)
return nbytes, memoryview(data)
else:
return nbytes, memoryview(b"")
def encrypt_password(
self, passwd: bytes, user: bytes, algorithm: Optional[bytes] = None
) -> bytes:
out = impl.PQencryptPasswordConn(
self._pgconn_ptr, passwd, user, algorithm
)
if not out:
raise e.OperationalError(
f"password encryption failed: {error_message(self)}"
)
rv = string_at(out)
impl.PQfreemem(out)
return rv
def make_empty_result(self, exec_status: int) -> "PGresult":
rv = impl.PQmakeEmptyPGresult(self._pgconn_ptr, exec_status)
if not rv:
raise MemoryError("couldn't allocate empty PGresult")
return PGresult(rv)
@property
def pipeline_status(self) -> int:
if version() < 140000:
return 0
return impl.PQpipelineStatus(self._pgconn_ptr)
def enter_pipeline_mode(self) -> None:
"""Enter pipeline mode.
:raises ~e.OperationalError: in case of failure to enter the pipeline
mode.
"""
if impl.PQenterPipelineMode(self._pgconn_ptr) != 1:
raise e.OperationalError("failed to enter pipeline mode")
def exit_pipeline_mode(self) -> None:
"""Exit pipeline mode.
:raises ~e.OperationalError: in case of failure to exit the pipeline
mode.
"""
if impl.PQexitPipelineMode(self._pgconn_ptr) != 1:
raise e.OperationalError(error_message(self))
def pipeline_sync(self) -> None:
"""Mark a synchronization point in a pipeline.
:raises ~e.OperationalError: if the connection is not in pipeline mode
or if sync failed.
"""
rv = impl.PQpipelineSync(self._pgconn_ptr)
if rv == 0:
raise e.OperationalError("connection not in pipeline mode")
if rv != 1:
raise e.OperationalError("failed to sync pipeline")
def send_flush_request(self) -> None:
"""Sends a request for the server to flush its output buffer.
:raises ~e.OperationalError: if the flush request failed.
"""
if impl.PQsendFlushRequest(self._pgconn_ptr) == 0:
raise e.OperationalError(
f"flush request failed: {error_message(self)}"
)
def _call_bytes(
self, func: Callable[[impl.PGconn_struct], Optional[bytes]]
) -> bytes:
"""
Call one of the pgconn libpq functions returning a bytes pointer.
"""
if not self._pgconn_ptr:
raise e.OperationalError("the connection is closed")
rv = func(self._pgconn_ptr)
assert rv is not None
return rv
def _call_int(self, func: Callable[[impl.PGconn_struct], int]) -> int:
"""
Call one of the pgconn libpq functions returning an int.
"""
if not self._pgconn_ptr:
raise e.OperationalError("the connection is closed")
return func(self._pgconn_ptr)
def _call_bool(self, func: Callable[[impl.PGconn_struct], int]) -> bool:
"""
Call one of the pgconn libpq functions returning a logical value.
"""
if not self._pgconn_ptr:
raise e.OperationalError("the connection is closed")
return bool(func(self._pgconn_ptr))
def _ensure_pgconn(self) -> None:
if not self._pgconn_ptr:
raise e.OperationalError("the connection is closed")
class PGresult:
"""
Python representation of a libpq result.
"""
__slots__ = ("_pgresult_ptr",)
def __init__(self, pgresult_ptr: impl.PGresult_struct):
self._pgresult_ptr: Optional[impl.PGresult_struct] = pgresult_ptr
def __del__(self) -> None:
self.clear()
def __repr__(self) -> str:
cls = f"{self.__class__.__module__}.{self.__class__.__qualname__}"
status = ExecStatus(self.status)
return f"<{cls} [{status.name}] at 0x{id(self):x}>"
def clear(self) -> None:
self._pgresult_ptr, p = None, self._pgresult_ptr
if p:
PQclear(p)
@property
def pgresult_ptr(self) -> Optional[int]:
"""The pointer to the underlying ``PGresult`` structure, as integer.
`!None` if the result was cleared.
The value can be used to pass the structure to libpq functions which
psycopg doesn't (currently) wrap, either in C or in Python using FFI
libraries such as `ctypes`.
"""
if self._pgresult_ptr is None:
return None
return addressof(self._pgresult_ptr.contents) # type: ignore[attr-defined]
@property
def status(self) -> int:
return impl.PQresultStatus(self._pgresult_ptr)
@property
def error_message(self) -> bytes:
return impl.PQresultErrorMessage(self._pgresult_ptr)
def error_field(self, fieldcode: int) -> Optional[bytes]:
return impl.PQresultErrorField(self._pgresult_ptr, fieldcode)
@property
def ntuples(self) -> int:
return impl.PQntuples(self._pgresult_ptr)
@property
def nfields(self) -> int:
return impl.PQnfields(self._pgresult_ptr)
def fname(self, column_number: int) -> Optional[bytes]:
return impl.PQfname(self._pgresult_ptr, column_number)
def ftable(self, column_number: int) -> int:
return impl.PQftable(self._pgresult_ptr, column_number)
def ftablecol(self, column_number: int) -> int:
return impl.PQftablecol(self._pgresult_ptr, column_number)
def fformat(self, column_number: int) -> int:
return impl.PQfformat(self._pgresult_ptr, column_number)
def ftype(self, column_number: int) -> int:
return impl.PQftype(self._pgresult_ptr, column_number)
def fmod(self, column_number: int) -> int:
return impl.PQfmod(self._pgresult_ptr, column_number)
def fsize(self, column_number: int) -> int:
return impl.PQfsize(self._pgresult_ptr, column_number)
@property
def binary_tuples(self) -> int:
return impl.PQbinaryTuples(self._pgresult_ptr)
def get_value(
self, row_number: int, column_number: int
) -> Optional[bytes]:
length: int = impl.PQgetlength(
self._pgresult_ptr, row_number, column_number
)
if length:
v = impl.PQgetvalue(self._pgresult_ptr, row_number, column_number)
return string_at(v, length)
else:
if impl.PQgetisnull(self._pgresult_ptr, row_number, column_number):
return None
else:
return b""
@property
def nparams(self) -> int:
return impl.PQnparams(self._pgresult_ptr)
def param_type(self, param_number: int) -> int:
return impl.PQparamtype(self._pgresult_ptr, param_number)
@property
def command_status(self) -> Optional[bytes]:
return impl.PQcmdStatus(self._pgresult_ptr)
@property
def command_tuples(self) -> Optional[int]:
rv = impl.PQcmdTuples(self._pgresult_ptr)
return int(rv) if rv else None
@property
def oid_value(self) -> int:
return impl.PQoidValue(self._pgresult_ptr)
def set_attributes(self, descriptions: List[PGresAttDesc]) -> None:
structs = [
impl.PGresAttDesc_struct(*desc) # type: ignore
for desc in descriptions
]
array = (impl.PGresAttDesc_struct * len(structs))(*structs) # type: ignore
rv = impl.PQsetResultAttrs(self._pgresult_ptr, len(structs), array)
if rv == 0:
raise e.OperationalError("PQsetResultAttrs failed")
class PGcancel:
"""
Token to cancel the current operation on a connection.
Created by `PGconn.get_cancel()`.
"""
__slots__ = ("pgcancel_ptr",)
def __init__(self, pgcancel_ptr: impl.PGcancel_struct):
self.pgcancel_ptr: Optional[impl.PGcancel_struct] = pgcancel_ptr
def __del__(self) -> None:
self.free()
def free(self) -> None:
"""
Free the data structure created by :pq:`PQgetCancel()`.
Automatically invoked by `!__del__()`.
See :pq:`PQfreeCancel()` for details.
"""
self.pgcancel_ptr, p = None, self.pgcancel_ptr
if p:
PQfreeCancel(p)
def cancel(self) -> None:
"""Requests that the server abandon processing of the current command.
See :pq:`PQcancel()` for details.
"""
buf = create_string_buffer(256)
res = impl.PQcancel(
self.pgcancel_ptr, pointer(buf), len(buf) # type: ignore
)
if not res:
raise e.OperationalError(
f"cancel failed: {buf.value.decode('utf8', 'ignore')}"
)
class Conninfo:
"""
Utility object to manipulate connection strings.
"""
@classmethod
def get_defaults(cls) -> List[ConninfoOption]:
opts = impl.PQconndefaults()
if not opts:
raise MemoryError("couldn't allocate connection defaults")
try:
return cls._options_from_array(opts)
finally:
impl.PQconninfoFree(opts)
@classmethod
def parse(cls, conninfo: bytes) -> List[ConninfoOption]:
if not isinstance(conninfo, bytes):
raise TypeError(f"bytes expected, got {type(conninfo)} instead")
errmsg = c_char_p()
rv = impl.PQconninfoParse(conninfo, pointer(errmsg))
if not rv:
if not errmsg:
raise MemoryError("couldn't allocate on conninfo parse")
else:
exc = e.OperationalError(
(errmsg.value or b"").decode("utf8", "replace")
)
impl.PQfreemem(errmsg)
raise exc
try:
return cls._options_from_array(rv)
finally:
impl.PQconninfoFree(rv)
@classmethod
def _options_from_array(
cls, opts: Sequence[impl.PQconninfoOption_struct]
) -> List[ConninfoOption]:
rv = []
skws = "keyword envvar compiled val label dispchar".split()
for opt in opts:
if not opt.keyword:
break
d = {kw: getattr(opt, kw) for kw in skws}
d["dispsize"] = opt.dispsize
rv.append(ConninfoOption(**d))
return rv
class Escaping:
"""
Utility object to escape strings for SQL interpolation.
"""
def __init__(self, conn: Optional[PGconn] = None):
self.conn = conn
def escape_literal(self, data: "abc.Buffer") -> memoryview:
if not self.conn:
raise e.OperationalError(
"escape_literal failed: no connection provided"
)
self.conn._ensure_pgconn()
# TODO: might be done without copy (however C does that)
if not isinstance(data, bytes):
data = bytes(data)
out = impl.PQescapeLiteral(self.conn._pgconn_ptr, data, len(data))
if not out:
raise e.OperationalError(
f"escape_literal failed: {error_message(self.conn)} bytes"
)
rv = string_at(out)
impl.PQfreemem(out)
return memoryview(rv)
def escape_identifier(self, data: "abc.Buffer") -> memoryview:
if not self.conn:
raise e.OperationalError(
"escape_identifier failed: no connection provided"
)
self.conn._ensure_pgconn()
if not isinstance(data, bytes):
data = bytes(data)
out = impl.PQescapeIdentifier(self.conn._pgconn_ptr, data, len(data))
if not out:
raise e.OperationalError(
f"escape_identifier failed: {error_message(self.conn)} bytes"
)
rv = string_at(out)
impl.PQfreemem(out)
return memoryview(rv)
def escape_string(self, data: "abc.Buffer") -> memoryview:
if not isinstance(data, bytes):
data = bytes(data)
if self.conn:
self.conn._ensure_pgconn()
error = c_int()
out = create_string_buffer(len(data) * 2 + 1)
impl.PQescapeStringConn(
self.conn._pgconn_ptr,
pointer(out), # type: ignore
data,
len(data),
pointer(error),
)
if error:
raise e.OperationalError(
f"escape_string failed: {error_message(self.conn)} bytes"
)
else:
out = create_string_buffer(len(data) * 2 + 1)
impl.PQescapeString(
pointer(out), # type: ignore
data,
len(data),
)
return memoryview(out.value)
def escape_bytea(self, data: "abc.Buffer") -> memoryview:
len_out = c_size_t()
# TODO: might be able to do without a copy but it's a mess.
# the C library does it better anyway, so maybe not worth optimising
# https://mail.python.org/pipermail/python-dev/2012-September/121780.html
if not isinstance(data, bytes):
data = bytes(data)
if self.conn:
self.conn._ensure_pgconn()
out = impl.PQescapeByteaConn(
self.conn._pgconn_ptr,
data,
len(data),
pointer(t_cast(c_ulong, len_out)),
)
else:
out = impl.PQescapeBytea(
data, len(data), pointer(t_cast(c_ulong, len_out))
)
if not out:
raise MemoryError(
f"couldn't allocate for escape_bytea of {len(data)} bytes"
)
rv = string_at(out, len_out.value - 1) # out includes final 0
impl.PQfreemem(out)
return memoryview(rv)
def unescape_bytea(self, data: bytes) -> memoryview:
# not needed, but let's keep it symmetric with the escaping:
# if a connection is passed in, it must be valid.
if self.conn:
self.conn._ensure_pgconn()
len_out = c_size_t()
out = impl.PQunescapeBytea(data, pointer(t_cast(c_ulong, len_out)))
if not out:
raise MemoryError(
f"couldn't allocate for unescape_bytea of {len(data)} bytes"
)
rv = string_at(out, len_out.value)
impl.PQfreemem(out)
return memoryview(rv)
# importing the ssl module sets up Python's libcrypto callbacks
import ssl # noqa
# disable libcrypto setup in libpq, so it won't stomp on the callbacks
# that have already been set up
impl.PQinitOpenSSL(1, 0)
| 1.710938
| 2
|
test/unit/test_first_conditional_stop.py
|
KTH/aspen
| 0
|
12782307
|
<filename>test/unit/test_first_conditional_stop.py
__author__ = '<EMAIL>'
import unittest
import mock
from test import mock_test_data # pylint: disable=C0411
from modules.steps.first_conditional_stop import FirstConditionalStop
from modules.util import data_defs, cache_defs
class TestFirstConditionalStop(unittest.TestCase):
def test_service_uses_semver(self):
pipeline_data = mock_test_data.get_pipeline_data()
step = FirstConditionalStop()
result = step.service_uses_semver(pipeline_data)
self.assertTrue(result)
pipeline_data[data_defs.SERVICES][1][data_defs.S_IMAGE][data_defs.IMG_IS_SEMVER] = False
result = step.service_uses_semver(pipeline_data)
self.assertFalse(result)
def test_caches_are_equal(self):
pipeline_data = {data_defs.STACK_FILE_DIR_HASH: 'abc123'}
pipeline_data[data_defs.CACHE_ENTRY] = None
step = FirstConditionalStop()
result = step.caches_are_equal(pipeline_data)
self.assertFalse(result)
pipeline_data[data_defs.CACHE_ENTRY] = {cache_defs.DIRECTORY_MD5: '123abc'}
result = step.caches_are_equal(pipeline_data)
self.assertFalse(result)
pipeline_data[data_defs.CACHE_ENTRY] = {cache_defs.DIRECTORY_MD5: 'abc123'}
result = step.caches_are_equal(pipeline_data)
self.assertTrue(result)
def test_run_step(self):
pipeline_data = mock_test_data.get_pipeline_data()
pipeline_data[data_defs.CACHE_ENTRY] = None
step = FirstConditionalStop()
step.stop_pipeline = mock.Mock()
# semver usage + changed hash: no stop
step.run_step(pipeline_data)
step.stop_pipeline.assert_not_called()
pipeline_data[data_defs.CACHE_ENTRY] = {cache_defs.DIRECTORY_MD5: 'alejfbabovudbasepvbsoev'}
step.stop_pipeline.reset_mock()
# semver usage + equal hash: no stop
step.run_step(pipeline_data)
step.stop_pipeline.assert_not_called()
pipeline_data[data_defs.SERVICES][1][data_defs.S_IMAGE][data_defs.IMG_IS_SEMVER] = False
step.stop_pipeline.reset_mock()
# no semver usage + equal hash: stop
step.run_step(pipeline_data)
step.stop_pipeline.assert_called_once()
pipeline_data[data_defs.STACK_FILE_DIR_HASH] = 'not_equal'
step.stop_pipeline.reset_mock()
# no semver usage + changed hash: no stop
step.run_step(pipeline_data)
step.stop_pipeline.assert_not_called()
| 2.09375
| 2
|
sppas/sppas/src/annotations/TextNorm/num2text/num_base.py
|
mirfan899/MTTS
| 0
|
12782308
|
<gh_stars>0
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
"""
from sppas import sppasValueError, sppasTypeError, sppasDictRepl
# ---------------------------------------------------------------------------
class sppasNumBase(object):
ASIAN_TYPED_LANGUAGES = ("yue", "cmn", "jpn", "pcm")
EUROPEAN_TYPED_LANGUAGES = ("fra", "ita", "eng", "spa", "pol", "por", "vie", "khm")
# ---------------------------------------------------------------------------
def __init__(self, lang=None, dictionary=None):
"""Create an instance of sppasNumBase.
:param lang: (str) name of the language
:raises: (sppasValueError)
"""
self.languages = ("und", "yue", "cmn", "fra", "ita", "eng", "spa",
"khm", "vie", "jpn", "pol", "por", "pcm")
self.separator = '_'
if lang is None or lang not in self.languages:
self.__lang = "und"
else:
self.__lang = lang
if dictionary is None or isinstance(dictionary, sppasDictRepl) is False:
raise sppasTypeError(dictionary, "sppasDictRepl")
self._lang_dict = sppasDictRepl()
if self.__lang is not "und" and dictionary is not None:
has_tenth_of_thousand = False
lang_except = ('vie', 'khm')
if dictionary.is_key('10000') and lang not in lang_except:
has_tenth_of_thousand = True
if has_tenth_of_thousand is True\
and self.__lang not in sppasNumBase.ASIAN_TYPED_LANGUAGES:
raise sppasValueError(dictionary, str(sppasNumBase.ASIAN_TYPED_LANGUAGES))
elif has_tenth_of_thousand is False\
and self.__lang in sppasNumBase.ASIAN_TYPED_LANGUAGES:
raise sppasValueError(dictionary, str(sppasNumBase.EUROPEAN_TYPED_LANGUAGES))
self._lang_dict = dictionary
# ---------------------------------------------------------------------------
def get_lang(self):
"""Return the current language.
:returns: (str)
"""
return self.__lang
# ---------------------------------------------------------------------------
def set_lang(self, lang):
"""Set the language to a new one and update the dictionary.
:param lang: (str) new language
:raises: sppasValueError
"""
if lang in self.languages:
self.__lang = lang
self._lang_dict = sppasDictRepl(self.__lang)
else:
raise sppasValueError(lang, str(self.languages))
# ---------------------------------------------------------------------------
def _get_lang_dict(self):
"""Return the current language dictionary.
:returns: (list) current language dictionary
"""
return self._lang_dict
# ---------------------------------------------------------------------------
def _units(self, number):
"""Return the "wordified" version of a unit number.
Returns the word corresponding to the given unit within the current
language dictionary
:param number: (int) number to convert in word
:returns: (str)
"""
if number == 0:
return self._lang_dict['0']
if 0 < number < 10:
return self._lang_dict[str(number)]
# ---------------------------------------------------------------------------
def _tenth(self, number):
"""Return the "wordified" version of a tenth number.
Returns the word corresponding to the given tenth within the current
language dictionary
:param number: (int) number to convert in word
:returns: (str)
"""
if number < 10:
return self._units(number)
else:
if self._lang_dict.is_key(number):
return self._lang_dict[str(number)]
else:
if self._lang_dict.is_key(int(number/10)*10):
if int(str(number)[1:]) == 0:
return self._lang_dict[str(number)]
else:
if self.__lang in sppasNumBase.ASIAN_TYPED_LANGUAGES:
return self._lang_dict[str(int(number/10)*10)] \
+ self._units(number % 10)
else:
return self._lang_dict[str(int(number/10)*10)] \
+ self.separator \
+ self._units(number % 10)
# ---------------------------------------------------------------------------
def _hundreds(self, number):
"""Return the "wordified" version of a hundred number.
Returns the word corresponding to the given hundred number within the
current language dictionary
:param number: (int) number to convert in word
:returns: (str)
"""
if number < 100:
return self._tenth(number)
else:
mult = None
if int(str(number)[0])*100 != 100:
mult = self._units(int(number/100))
if mult is None:
if int(str(number)[1:]) == 0:
return self._lang_dict['100']
else:
if self.__lang in sppasNumBase.ASIAN_TYPED_LANGUAGES:
return self._lang_dict['100'] \
+ self._tenth(number % 100)
else:
return self._lang_dict['100']\
+ self.separator \
+ self._tenth(number % 100)
else:
if int(str(number)[1:]) == 0:
if self.__lang in sppasNumBase.ASIAN_TYPED_LANGUAGES:
return mult + self._lang_dict['100']\
+ self._tenth(number % 100)
else:
return mult + self.separator\
+ self._lang_dict['100']
else:
if self.__lang in sppasNumBase.ASIAN_TYPED_LANGUAGES:
return mult + self._lang_dict['100']\
+ self._tenth(number % 100)
else:
return mult + self.separator\
+ self._lang_dict['100']\
+ self.separator\
+ self._tenth(number % 100)
# ---------------------------------------------------------------------------
def _thousands(self, number):
"""Return the "wordified" version of a thousand number.
Returns the word corresponding to the given thousand number within the
current language dictionary
:param number: (int) number to convert in word
:returns: (str)
"""
if number < 1000:
return self._hundreds(number)
else:
mult = None
if number/1000*1000 != 1000:
mult = self._hundreds(int(number/1000))
if mult is None:
if int(str(number)[1:]) == 0:
if self.__lang in sppasNumBase.ASIAN_TYPED_LANGUAGES:
return self._lang_dict['1']\
+ self._lang_dict['1000']
else:
return self._lang_dict['1']\
+ self.separator\
+ self._lang_dict['1000']
else:
if self.__lang in sppasNumBase.ASIAN_TYPED_LANGUAGES:
return self._lang_dict['1']\
+ self._lang_dict['1000'] \
+ self._hundreds(number % 1000)
else:
return self._lang_dict['1000'] \
+ self.separator\
+ self._hundreds(number % 1000)
else:
if int(str(number)[1:]) == 0:
if self.__lang in sppasNumBase.ASIAN_TYPED_LANGUAGES:
return mult + self._lang_dict['1000'] \
+ self._hundreds(number % 1000)
else:
return mult + self.separator\
+ self._lang_dict['1000']
else:
if self.__lang in sppasNumBase.ASIAN_TYPED_LANGUAGES:
return mult + self._lang_dict['1000'] \
+ self._hundreds(number % 1000)
else:
return mult + self.separator\
+ self._lang_dict['1000'] \
+ self.separator\
+ self._hundreds(number % 1000)
# ---------------------------------------------------------------------------
def _billions(self, number):
"""Return the "wordified" version of a billion number
Returns the word corresponding to the given billion number within the
current language dictionary
:param number: (int) number to convert in word
:returns: (str)
"""
raise NotImplementedError
# ---------------------------------------------------------------------------
def convert(self, number):
"""Return the whole "wordified" given number.
Returns the entire number given in parameter in a "wordified" state
it calls recursively the sub functions within the instance and more
specifics ones in the sub-classes
:param number: (int) number to convert into word
:returns: (str)
"""
stringyfied_number = str(number)
if stringyfied_number.isdigit() is False:
raise sppasValueError(number, "int")
res = ''
if len(stringyfied_number) > 1:
if stringyfied_number.startswith('0'):
while '0' == stringyfied_number[0]:
res += self._lang_dict['0'] + self.separator
stringyfied_number = stringyfied_number[1:]
res += self._billions(int(number))
return res if res is not None else number
| 1.523438
| 2
|
scripts/general_analysis/aom_response.py
|
charlesblakemore/opt_lev_analysis
| 0
|
12782309
|
import os, fnmatch, sys, time
import dill as pickle
import scipy.interpolate as interp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import bead_util as bu
import calib_util as cu
import configuration as config
import time
dirname = '/data/old_trap/20201202/power/init'
files, _ = bu.find_all_fnames(dirname, sort_time=True)
fb_set = []
power = []
for filname in files:
df = bu.DataFile()
df.load(filname)
fb_set.append(np.mean(df.pos_fb[2]))
power.append(np.abs(np.mean(df.power)))
plt.plot(fb_set, power)
plt.show()
| 1.695313
| 2
|
pythoncev/exercicios/ex044.py
|
gustavobelloni/Python
| 0
|
12782310
|
preço = float(input('Preço das compras: R$'))
print('''FORMAS DE PAGAMENTO
[ 1 ] à vista em dinheiro/cheque
[ 2 ] à vista no cartão
[ 3 ] 2x no cartão
[ 4 ] 3x ou mais no cartão''')
opção = int(input('Qual é a opção? '))
if opção == 1:
desc10 = preço - (preço * 10 / 100)
print(f'A sua compra de R${preço:.2f}, com desconto de 10%, vai custar R${desc10:.2f} no final')
elif opção == 2:
desc5 = preço - (preço * 5 / 100)
print(f'A sua compra de R${preço:.2f}, com desconto de 5%, vai custar R${desc5:.2f} no final')
elif opção == 3:
x2 = preço / 2
print(f'''A sua compra será parcelada em 2x de R${x2:.2f} SEM JUROS
Sua compra de R${preço:.2f} vai custar R${preço:.2f} no final.''')
elif opção == 4:
juros20 = preço + (preço * 20 / 100)
parcelas = int(input('Quantas parcelas? '))
print(f'''Sua compra está parcelada em {parcelas}x de R${juros20 / parcelas:.2f} COM JUROS
sua compra de R${preço:.2f} vai custar R${juros20:.2f} no final.''')
else:
print('Opção inválida!')
| 3.828125
| 4
|
tests/test_requirements.py
|
kkoralsky/pex
| 4
|
12782311
|
<reponame>kkoralsky/pex
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from textwrap import dedent
import pytest
from pkg_resources import Requirement
from twitter.common.contextutil import temporary_dir
from pex.requirements import requirements_from_file, requirements_from_lines
from pex.resolvable import ResolvableRequirement
from pex.resolver_options import ResolverOptionsBuilder
def test_from_empty_lines():
reqs = requirements_from_lines([])
assert len(reqs) == 0
reqs = requirements_from_lines(dedent("""
# comment
""").splitlines())
assert len(reqs) == 0
@pytest.mark.parametrize('flag_separator', (' ', '='))
def test_line_types(flag_separator):
reqs = requirements_from_lines(dedent("""
simple_requirement
specific_requirement==2
--allow-external%sspecific_requirement
""" % flag_separator).splitlines())
# simple_requirement
assert len(reqs) == 2
assert isinstance(reqs[0], ResolvableRequirement)
assert reqs[0].requirement == Requirement.parse('simple_requirement')
assert not reqs[0].options._allow_external
# specific_requirement
assert isinstance(reqs[1], ResolvableRequirement)
assert reqs[1].requirement == Requirement.parse('specific_requirement==2')
assert reqs[1].options._allow_external
def test_all_external():
reqs = requirements_from_lines(dedent("""
simple_requirement
specific_requirement==2
--allow-all-external
""").splitlines())
assert reqs[0].options._allow_external
assert reqs[1].options._allow_external
def test_allow_prereleases():
# Prereleases should be disallowed by default.
reqs = requirements_from_lines(dedent("""
simple_requirement
specific_requirement==2
""").splitlines())
assert not reqs[0].options._allow_prereleases
assert not reqs[1].options._allow_prereleases
reqs = requirements_from_lines(dedent("""
--pre
simple_requirement
specific_requirement==2
""").splitlines())
assert reqs[0].options._allow_prereleases
assert reqs[1].options._allow_prereleases
def test_index_types():
reqs = requirements_from_lines(dedent("""
simple_requirement
--no-index
""").splitlines())
assert reqs[0].options._fetchers == []
for prefix in ('-f ', '--find-links ', '--find-links='):
reqs = requirements_from_lines(dedent("""
foo
--no-index
%shttps://example.com/repo
""" % prefix).splitlines())
assert len(reqs[0].options._fetchers) == 1
assert reqs[0].options._fetchers[0].urls('foo') == ['https://example.com/repo']
for prefix in ('-i ', '--index-url ', '--index-url=', '--extra-index-url ', '--extra-index-url='):
reqs = requirements_from_lines(dedent("""
foo
--no-index
%shttps://example.com/repo/
""" % prefix).splitlines())
assert len(reqs[0].options._fetchers) == 1, 'Prefix is: %r' % prefix
assert reqs[0].options._fetchers[0].urls('foo') == ['https://example.com/repo/foo/']
def test_nested_requirements():
with temporary_dir() as td1:
with temporary_dir() as td2:
with open(os.path.join(td1, 'requirements.txt'), 'w') as fp:
fp.write(dedent('''
requirement1
requirement2
-r %s
-r %s
''' % (
os.path.join(td2, 'requirements_nonrelative.txt'),
os.path.join('relative', 'requirements_relative.txt'))
))
with open(os.path.join(td2, 'requirements_nonrelative.txt'), 'w') as fp:
fp.write(dedent('''
requirement3
requirement4
'''))
os.mkdir(os.path.join(td1, 'relative'))
with open(os.path.join(td1, 'relative', 'requirements_relative.txt'), 'w') as fp:
fp.write(dedent('''
requirement5
requirement6
'''))
def rr(req):
return ResolvableRequirement.from_string(req, ResolverOptionsBuilder())
reqs = requirements_from_file(os.path.join(td1, 'requirements.txt'))
assert reqs == [rr('requirement%d' % k) for k in (1, 2, 3, 4, 5, 6)]
| 2.140625
| 2
|
src/python/tools/tool1.py
|
tuh8888/hpl-util
| 0
|
12782312
|
<reponame>tuh8888/hpl-util
bool x(int a, int b)
{
}
bool y(int a, int b)
{
}
bool z(int c)
{
}
| 1.101563
| 1
|
codenames/models/yolov2/__init__.py
|
vladimir-tikhonov/codenames_ai
| 0
|
12782313
|
<filename>codenames/models/yolov2/__init__.py
from .yolov2 import YoloV2
__all__ = [
'YoloV2'
]
| 1.195313
| 1
|
Carletproject/Carletproject/urls.py
|
shahparkhan/CarLet
| 0
|
12782314
|
"""Carletproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from Carletapp import views
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('signup1/', csrf_exempt(views.SignUp1.as_view())),
path('signup2/', csrf_exempt(views.SignUp2.as_view())),
path('login/', csrf_exempt(views.Login.as_view())),
path('uservalidation/', csrf_exempt(views.UserRegistrationValidation.as_view())),
path('userregister/', csrf_exempt(views.UserRegistration.as_view())),
path('forgotpassword/', csrf_exempt(views.ForgetPassword.as_view())),
path('changepassword/', csrf_exempt(views.ChangePassword.as_view())),
path('checkverification/', csrf_exempt(views.CheckVerification.as_view())),
path('checkregistration/', csrf_exempt(views.CheckRegistration.as_view())),
path('searchvehicle/', csrf_exempt(views.SearchVehicle.as_view())),
path('registervehicle/', csrf_exempt(views.VehicleRegistration.as_view())),
path('licensevalidation/', csrf_exempt(views.VehicleDetailValidation.as_view())),
path('requestvehicle/', csrf_exempt(views.RequestVehicle.as_view())),
path('approverequest/', csrf_exempt(views.ApproveRequest.as_view())),
path('ratevehicle/', csrf_exempt(views.RaterReviewVehicle.as_view())),
path('raterenter/', csrf_exempt(views.RateReviewRenter.as_view())),
path('sentrentrequest/', csrf_exempt(views.SentRentRequest.as_view())),
path('rcvrentrequest/', csrf_exempt(views.RecvRentRequest.as_view())),
path('generatereceipt/', csrf_exempt(views.GenerateReceipt.as_view())),
path('uploadreceipt/', csrf_exempt(views.UploadReceipt.as_view())),
path('getprofileinfo/', csrf_exempt(views.GetProfileInfo.as_view())),
path('payment/', csrf_exempt(views.Payment.as_view())),
path('accountsetting/<str:pk>/', csrf_exempt(views.ProfileAccountSetting.as_view())),
path('uservehicle/<str:pk>/', csrf_exempt(views.UserVehicleList.as_view())),
path('vehiclesetting/<str:pk>/', csrf_exempt(views.VehicleSetting.as_view())),
path('triphistory/<str:pk>/', csrf_exempt(views.TripHistory.as_view())),
path('profilepic/<str:pk>/', csrf_exempt(views.RetreiveProfilePicture.as_view())),
path('vehiclepictures/<str:pk>/', csrf_exempt(views.DisplayVehiclePictures.as_view())),
path('redeemamount/<str:pk>/', csrf_exempt(views.RedeemAmount.as_view())),
path('removefromrent/<str:pk>/', csrf_exempt(views.RemoveVehicleForRent.as_view())),
path('updateprofilepic/<str:pk>/', csrf_exempt(views.UpdateProfilePicture.as_view())),
path('addfav/', csrf_exempt(views.AddFavorite.as_view())),
path('removefav/<str:pk>/', csrf_exempt(views.RemoveFavorite.as_view())),
path('displayfav/<str:pk>/', csrf_exempt(views.FavoriteList.as_view())),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 2.375
| 2
|
p3.py
|
aleksanderhan/ProjectEuler
| 0
|
12782315
|
<filename>p3.py
from is_prime import is_prime
def largest_prime_factor(n):
i = 2
while i != n:
if is_prime(i) and n%i == 0:
n = int(n/i)
else:
i += 1
return(i)
print(largest_prime_factor(600851475143))
| 3.640625
| 4
|
Python/PythonIfElse.py
|
chicio/Hackerrank
| 6
|
12782316
|
<reponame>chicio/Hackerrank
#
# PythonIfElse.py
# HackerRank
#
# Created by <NAME> on 14/10/17.
#
# https://www.hackerrank.com/challenges/py-if-else
n = int(raw_input())
if n % 2 != 0:
print "Weird"
else:
if 2 <= n <= 5:
print "Not Weird"
if 6 <= n <= 20:
print "Weird"
if n > 20:
print "Not Weird"
| 3.3125
| 3
|
webapp/home/migrations/0008_alter_notice_enabled.py
|
usegalaxy-au/galaxy-media-site
| 0
|
12782317
|
# Generated by Django 3.2 on 2021-12-02 01:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0007_alter_user_email'),
]
operations = [
migrations.AlterField(
model_name='notice',
name='enabled',
field=models.BooleanField(default=False, help_text='Display on the Galaxy Australia landing page.'),
),
]
| 1.5625
| 2
|
auto_typing_game.py
|
Plummy-Panda/python-magictype
| 0
|
12782318
|
import socket
import re
import config
def get_word(data):
word = None
word_regexp = re.compile(r'[^Score:\s\d{1,}]([a-zA-Z0-9]+)')
found = word_regexp.search(data)
if found:
word = found.group(1)
else:
pass
return word
def get_score(data):
score = None
score_regexp = re.compile(r'Score:\s(\d{1,})')
found = score_regexp.search(data)
if found:
score = int(found.group(1))
else:
pass
return score
def main():
playing = True
is_game_over = False
lastScore = 0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (config.HOST, config.PORT)
print 'connecting to %s port %s' % server_address
sock.connect(server_address)
while True:
data = sock.recv(1024)
if '=====Magic Type Menu=====' in data and playing:
print "[*] Play a game!"
sock.sendall('1\r\n')
if 'Choose the speed level' in data and playing:
print "[*] Choose speed level at " + str(config.LEVEL) + '!'
sock.sendall(str(config.LEVEL) + '\r\n')
if 'Game over' in data:
print '[*] Game over!'
is_game_over = True
if '|' in data and playing:
score = get_score(data)
word = get_word(data)
if score is not None:
if score >= config.STOP_THRESHOLD_SCORE:
playing = False
else:
if lastScore != score:
print 'Score:', score
lastScore = score
if word is not None:
print 'Found word: ', word
sock.sendall(word + '\r\n')
if is_game_over:
data = sock.recv(1024)
print data
break
print 'Close the socket!'
sock.close()
if __name__ == '__main__':
main()
| 3.25
| 3
|
partname_resolver/components/part.py
|
sakoPO/partname-resolver
| 0
|
12782319
|
from enum import Enum
class Type(Enum):
MLCC = "Multi layer ceramic capacitor"
ElectrolyticAluminium = "Aluminium Electrolytic Capacitor"
ThinFilmResistor = "Thin Film Resistor"
ThickFilmResistor = "Thick Film Resistor"
ThinFilmResistorArray = "Thin Film Resistor Array"
ThickFilmResistorArray = "Thick Film Resistor Array"
| 2.515625
| 3
|
gps_nav/shortest_path_visualizer.py
|
heng2j/delamain
| 2
|
12782320
|
<reponame>heng2j/delamain
"""
SHORTEST PATH VISUALIZER
......
Created by DevGlitch
"""
import glob
import os
import sys
try:
sys.path.append(
glob.glob(
"../carla/dist/carla-*%d.%d-%s.egg"
% (
sys.version_info.major,
sys.version_info.minor,
"win-amd64" if os.name == "nt" else "linux-x86_64",
)
)[0]
)
except IndexError:
pass
import carla
import argparse
import pandas as pd
from transform_geo_to_carla_xyz import from_gps_to_xyz
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument(
"--host",
metavar="H",
default="127.0.0.1",
help="IP of the host server (default: 127.0.0.1)",
)
argparser.add_argument(
"-p",
"--port",
metavar="P",
default=2000,
type=int,
help="TCP port to listen to (default: 2000)",
)
args = argparser.parse_args()
try:
client = carla.Client(args.host, args.port)
client.set_timeout(2.0)
world = client.get_world()
# carla_map = world.get_map()
# Path - CSV file only for development - It will use the DF directly
path = pd.read_csv("test_path_Town02.csv")
# Dropping the first column as pd.to_csv created a column for the index
path.drop(path.columns[0], axis=1, inplace=True)
# For debug printing the dataframe
# print(path, "\n\n\n")
for index, row in path.iterrows():
# id = row["id"]
lat = row["lat"]
lon = row["lon"]
alt = row["alt"]
# For debug printing each row
# print("index:", index, "\nid=", id, "\nlat=", lat, "\nlon=", lon, "\nalt=", alt, "\n")
# Converting geolocation coordinates to carla x y z coordinates (in meters)
a, b, c = from_gps_to_xyz(lat, lon, alt)
# print("\na=", a, "\nb=", b, "\nc=", c)
# For debug
# print("id=", id, "\nx=", x, "\ny=", y, "\nz=", z, "\n")
# Need to draw on the carla environment every single waypoint of the path
# Maybe green for start, red for end, and orange in between?
# To visualize each waypoint on the CARLA map
# Starting waypoint (green)
if index == 0:
world.debug.draw_string(
carla.Location(a, b, c + 1),
"START",
draw_shadow=False,
color=carla.Color(r=255, g=64, b=0),
life_time=10.0,
persistent_lines=False,
)
continue
# Ending waypoint (red)
if index == path.last_valid_index():
world.debug.draw_string(
carla.Location(a, b, c + 1),
"END",
draw_shadow=False,
color=carla.Color(r=255, g=0, b=0),
life_time=10.0,
persistent_lines=False,
)
# Waypoints between start and finish (blue)
else:
world.debug.draw_string(
carla.Location(a, b, c + 1),
"X",
draw_shadow=False,
color=carla.Color(r=0, g=0, b=255),
life_time=10.0,
persistent_lines=False,
)
finally:
pass
if __name__ == "__main__":
try:
main()
finally:
print("Done.")
| 2.78125
| 3
|
salt/modules/mod_random.py
|
tomdoherty/salt
| 9,425
|
12782321
|
<filename>salt/modules/mod_random.py
"""
Provides access to randomness generators.
=========================================
.. versionadded:: 2014.7.0
"""
import base64
import hashlib
import random
import salt.utils.pycrypto
from salt.exceptions import SaltInvocationError
ALGORITHMS_ATTR_NAME = "algorithms_guaranteed"
# Define the module's virtual name
__virtualname__ = "random"
def __virtual__():
return __virtualname__
def hash(value, algorithm="sha512"):
"""
.. versionadded:: 2014.7.0
Encodes a value with the specified encoder.
value
The value to be hashed.
algorithm : sha512
The algorithm to use. May be any valid algorithm supported by
hashlib.
CLI Example:
.. code-block:: bash
salt '*' random.hash 'I am a string' md5
"""
if isinstance(value, str):
# Under Python 3 we must work with bytes
value = value.encode(__salt_system_encoding__)
if hasattr(hashlib, ALGORITHMS_ATTR_NAME) and algorithm in getattr(
hashlib, ALGORITHMS_ATTR_NAME
):
hasher = hashlib.new(algorithm)
hasher.update(value)
out = hasher.hexdigest()
elif hasattr(hashlib, algorithm):
hasher = hashlib.new(algorithm)
hasher.update(value)
out = hasher.hexdigest()
else:
raise SaltInvocationError("You must specify a valid algorithm.")
return out
def str_encode(value, encoder="base64"):
"""
.. versionadded:: 2014.7.0
value
The value to be encoded.
encoder : base64
The encoder to use on the subsequent string.
CLI Example:
.. code-block:: bash
salt '*' random.str_encode 'I am a new string' base64
"""
if isinstance(value, str):
value = value.encode(__salt_system_encoding__)
if encoder == "base64":
try:
out = base64.b64encode(value)
out = out.decode(__salt_system_encoding__)
except TypeError:
raise SaltInvocationError("Value must be an encode-able string")
else:
try:
out = value.encode(encoder)
except LookupError:
raise SaltInvocationError("You must specify a valid encoder")
except AttributeError:
raise SaltInvocationError("Value must be an encode-able string")
return out
def get_str(
length=20,
chars=None,
lowercase=True,
uppercase=True,
digits=True,
punctuation=True,
whitespace=False,
printable=False,
):
"""
.. versionadded:: 2014.7.0
.. versionchanged:: 3004.0
Changed the default character set used to include symbols and implemented arguments to control the used character set.
Returns a random string of the specified length.
length : 20
Any valid number of bytes.
chars : None
.. versionadded:: 3004.0
String with any character that should be used to generate random string.
This argument supersedes all other character controlling arguments.
lowercase : True
.. versionadded:: 3004.0
Use lowercase letters in generated random string.
(see :py:data:`string.ascii_lowercase`)
This argument is superseded by chars.
uppercase : True
.. versionadded:: 3004.0
Use uppercase letters in generated random string.
(see :py:data:`string.ascii_uppercase`)
This argument is superseded by chars.
digits : True
.. versionadded:: 3004.0
Use digits in generated random string.
(see :py:data:`string.digits`)
This argument is superseded by chars.
printable : False
.. versionadded:: 3004.0
Use printable characters in generated random string and includes lowercase, uppercase,
digits, punctuation and whitespace.
(see :py:data:`string.printable`)
It is disabled by default as includes whitespace characters which some systems do not
handle well in passwords.
This argument also supersedes all other classes because it includes them.
This argument is superseded by chars.
punctuation : True
.. versionadded:: 3004.0
Use punctuation characters in generated random string.
(see :py:data:`string.punctuation`)
This argument is superseded by chars.
whitespace : False
.. versionadded:: 3004.0
Use whitespace characters in generated random string.
(see :py:data:`string.whitespace`)
It is disabled by default as some systems do not handle whitespace characters in passwords
well.
This argument is superseded by chars.
CLI Example:
.. code-block:: bash
salt '*' random.get_str 128
salt '*' random.get_str 128 chars='abc123.!()'
salt '*' random.get_str 128 lowercase=False whitespace=True
"""
return salt.utils.pycrypto.secure_password(
length=length,
chars=chars,
lowercase=lowercase,
uppercase=uppercase,
digits=digits,
punctuation=punctuation,
whitespace=whitespace,
printable=printable,
)
def shadow_hash(crypt_salt=None, password=None, algorithm="<PASSWORD>"):
"""
Generates a salted hash suitable for /etc/shadow.
crypt_salt : None
Salt to be used in the generation of the hash. If one is not
provided, a random salt will be generated.
password : None
Value to be salted and hashed. If one is not provided, a random
password will be generated.
algorithm : sha512
Hash algorithm to use.
CLI Example:
.. code-block:: bash
salt '*' random.shadow_hash 'My5alT' 'MyP@asswd' md5
"""
return salt.utils.pycrypto.gen_hash(crypt_salt, password, algorithm)
def rand_int(start=1, end=10, seed=None):
"""
Returns a random integer number between the start and end number.
.. versionadded:: 2015.5.3
start : 1
Any valid integer number
end : 10
Any valid integer number
seed :
Optional hashable object
.. versionchanged:: 2019.2.0
Added seed argument. Will return the same result when run with the same seed.
CLI Example:
.. code-block:: bash
salt '*' random.rand_int 1 10
"""
if seed is not None:
random.seed(seed)
return random.randint(start, end)
def seed(range=10, hash=None):
"""
Returns a random number within a range. Optional hash argument can
be any hashable object. If hash is omitted or None, the id of the minion is used.
.. versionadded:: 2015.8.0
hash: None
Any hashable object.
range: 10
Any valid integer number
CLI Example:
.. code-block:: bash
salt '*' random.seed 10 hash=None
"""
if hash is None:
hash = __grains__["id"]
random.seed(hash)
return random.randrange(range)
| 3.25
| 3
|
run.py
|
ServiceInnovationLab/strawberry
| 0
|
12782322
|
#!/usr/bin/env python
# Read secrets from .env file
import requests
import csv
import json
import os
from dotenv import load_dotenv
load_dotenv()
BOARD_ID = os.getenv("TRELLO_BOARD_ID")
TRELLO_API_KEY = os.getenv('TRELLO_API_KEY')
TRELLO_TOKEN = os.getenv('TRELLO_TOKEN')
output_filename = f'output-{BOARD_ID}.csv'
keep_fetching = True
BASE_URL = "https://api.trello.com/1/boards/{board_id}/actions/?key={api_key}&token={token}&limit=1000".format(
board_id=BOARD_ID,
api_key=TRELLO_API_KEY,
token=T<PASSWORD>LO_TOKEN)
url = BASE_URL
with open(output_filename, mode='w') as csv_file:
# , quoting=csv.QUOTE_MINIMAL)
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"')
# headers
csv_writer.writerow(['timestamp', 'type', 'card_id', 'card_name', 'card_shortLink',
'listAfter_id', 'listAfter_name', 'listBefore_id', 'listBefore_name', 'text',
'member_fullName', 'member_username'])
while(keep_fetching):
print(url)
print("fetching...")
response = requests.get(url)
print("done.")
# json_data = json.load(json_file)
# for action in json_data.get('actions'):
for action in response.json():
row = []
data = action.get('data')
card = data.get('card', {})
# type
row.append(action.get('date'))
row.append(action.get('type'))
# data.card.id
# data.card.name
# data.card.shortLink
row.append(card.get('id', ''))
row.append(card.get('name', ''))
row.append(card.get('shortLink', ''))
listAfter = data.get('listAfter', {})
# data.listAfter.id
# data.listAfter.name
row.append(listAfter.get('id', ''))
row.append(listAfter.get('name', ''))
listBefore = data.get('listBefore', {})
# data.listBefore.id
# data.listBefore.name
row.append(listBefore.get('id', ''))
row.append(listBefore.get('name', ''))
# data.text
row.append(data.get('text', ''))
memberCreator = action.get('memberCreator', {})
# memberCreator.fullName
# memberCreator.username
row.append(memberCreator.get('fullName', ''))
row.append(memberCreator.get('username', ''))
# Write to the CSV file
csv_writer.writerow(row)
# if we got data, then keep going
keep_fetching = len(response.json()) > 0
if (keep_fetching):
# last_action
oldest_action = response.json()[-1]
print(oldest_action.get('date'))
url = "{base_url}&before={oldest_id}".format(
base_url=BASE_URL, oldest_id=oldest_action.get('id'))
else:
print("No records")
print("----------------------")
| 2.703125
| 3
|
estudo/processamento_de_videos.py
|
PedroMoreira87/machine-learning
| 0
|
12782323
|
<reponame>PedroMoreira87/machine-learning
# TAREFA EXTRA
# 1. Pegar o vídeo "Odalisca E45.mpg" e transformar em uma sequência de imagens
# ORIENTAÇÕES ADICIONAIS:
# 1. Podem trabalhar com as imagens que forem obtidas do vídeo ou com as 4 imagens que estão contidas na pasta.
# Adicionalmente, podem fazer os mesmos experimentos com algum outro conjunto de imagens que desejem.
# 2. Abaixo estão alguns trechos de código que podem ajudar.
# Importing all necessary libraries
import cv2
import os
# Read the video from specified path
cam = cv2.VideoCapture('videos/south_park.mp4')
try:
# creating a folder
if not os.path.exists('videos/frames'):
os.makedirs('videos/frames')
# if not created then raise error
except OSError:
print('Error: Creating directory of data')
# frame
currentframe = 0
while True:
# reading from frame
ret, frame = cam.read()
if ret:
# if video is still left continue creating images
name = './videos/frames/' + str(currentframe) + '.jpg'
print('Creating...' + name)
# writing the extracted images
cv2.imwrite(name, frame)
# increasing counter so that it will
# show how many frames are created
currentframe += 1
else:
break
# Release all space and windows once done
cam.release()
cv2.destroyAllWindows()
| 3.328125
| 3
|
torchx/cli/cmd_run.py
|
grievejia/torchx
| 0
|
12782324
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import sys
import threading
from dataclasses import asdict
from pprint import pformat
from typing import Dict, List, Optional, Type
import torchx.specs as specs
from pyre_extensions import none_throws
from torchx.cli.cmd_base import SubCommand
from torchx.cli.cmd_log import get_logs
from torchx.runner import Runner, config, get_runner
from torchx.schedulers import get_default_scheduler_name, get_scheduler_factories
from torchx.specs import CfgVal
from torchx.specs.finder import (
ComponentNotFoundException,
ComponentValidationException,
_Component,
get_builtin_source,
get_components,
)
from torchx.util.types import to_dict
logger: logging.Logger = logging.getLogger(__name__)
def _convert_to_option_type(
value: str, option_type: Type[specs.CfgVal]
) -> specs.CfgVal:
if option_type == bool:
return value.lower() == "true"
elif option_type == List[str]:
return value.split(";")
else:
# pyre-ignore[19]
return option_type(value)
def _parse_run_config(arg: str, scheduler_opts: specs.runopts) -> Dict[str, CfgVal]:
conf: Dict[str, CfgVal] = {}
if not arg:
return conf
for key, value in to_dict(arg).items():
option = scheduler_opts.get(key)
if option is None:
raise ValueError(f"Unknown {key}, run `torchx runopts` for more info")
option_type = option.opt_type
typed_value = _convert_to_option_type(value, option_type)
conf[key] = typed_value
return conf
class CmdBuiltins(SubCommand):
def add_arguments(self, subparser: argparse.ArgumentParser) -> None:
subparser.add_argument(
"--print",
type=str,
help="prints the builtin's component def to stdout",
)
def _builtins(self) -> Dict[str, _Component]:
return get_components()
def run(self, args: argparse.Namespace) -> None:
builtin_name = args.print
if not builtin_name:
builtin_components = self._builtins()
num_builtins = len(builtin_components)
print(f"Found {num_builtins} builtin components:")
for i, component in enumerate(builtin_components.values()):
print(f" {i + 1:2d}. {component.name}")
else:
print(get_builtin_source(builtin_name))
class CmdRun(SubCommand):
def __init__(self) -> None:
self._subparser: Optional[argparse.ArgumentParser] = None
def add_arguments(self, subparser: argparse.ArgumentParser) -> None:
scheduler_names = get_scheduler_factories().keys()
self._subparser = subparser
subparser.add_argument(
"-s",
"--scheduler",
type=str,
help=f"Name of the scheduler to use. One of: [{','.join(scheduler_names)}]",
default=get_default_scheduler_name(),
)
subparser.add_argument(
"-cfg",
"--scheduler_args",
type=str,
help="Arguments to pass to the scheduler (Ex:`cluster=foo,user=bar`)."
" For a list of scheduler run options run: `torchx runopts`"
"",
)
subparser.add_argument(
"--dryrun",
action="store_true",
default=False,
help="Does not actually submit the app,"
" just prints the scheduler request",
)
subparser.add_argument(
"--wait",
action="store_true",
default=False,
help="Wait for the app to finish before exiting.",
)
subparser.add_argument(
"--log",
action="store_true",
default=False,
help="Stream logs while waiting for app to finish.",
)
subparser.add_argument(
"conf_args",
nargs=argparse.REMAINDER,
)
def _run(self, runner: Runner, args: argparse.Namespace) -> None:
if args.scheduler == "local":
logger.warning(
"`local` scheduler is deprecated and will be"
" removed in the near future,"
" please use other variants of the local scheduler"
" (e.g. `local_cwd`)"
)
run_opts = get_runner().run_opts()
scheduler_opts = run_opts[args.scheduler]
cfg = _parse_run_config(args.scheduler_args, scheduler_opts)
config.apply(scheduler=args.scheduler, cfg=cfg)
if len(args.conf_args) < 1:
none_throws(self._subparser).error(
"the following arguments are required: conf_file, conf_args"
)
# Python argparse would remove `--` if it was the first argument. This
# does not work well for torchx, since torchx.specs.api uses another argparser to
# parse component arguments.
conf_file, conf_args = args.conf_args[0], args.conf_args[1:]
try:
if args.dryrun:
dryrun_info = runner.dryrun_component(
conf_file, conf_args, args.scheduler, cfg
)
logger.info(
"\n=== APPLICATION ===\n"
f"{pformat(asdict(dryrun_info._app), indent=2, width=80)}"
)
logger.info("\n=== SCHEDULER REQUEST ===\n" f"{dryrun_info}")
else:
app_handle = runner.run_component(
conf_file,
conf_args,
args.scheduler,
cfg,
)
# DO NOT delete this line. It is used by slurm tests to retrieve the app id
print(app_handle)
if args.scheduler.startswith("local"):
self._wait_and_exit(runner, app_handle, log=True)
else:
logger.info(f"Launched app: {app_handle}")
status = runner.status(app_handle)
logger.info(status)
logger.info(f"Job URL: {none_throws(status).ui_url}")
if args.wait:
self._wait_and_exit(runner, app_handle, log=args.log)
except (ComponentValidationException, ComponentNotFoundException) as e:
error_msg = f"\nFailed to run component `{conf_file}` got errors: \n {e}"
logger.error(error_msg)
sys.exit(1)
except specs.InvalidRunConfigException as e:
error_msg = (
f"Scheduler arg is incorrect or missing required option: `{e.cfg_key}`\n"
f"Run `torchx runopts` to check configuration for `{args.scheduler}` scheduler\n"
f"Use `-cfg` to specify run cfg as `key1=value1,key2=value2` pair\n"
"of setup `.torchxconfig` file, see: https://pytorch.org/torchx/main/experimental/runner.config.html"
)
logger.error(error_msg)
sys.exit(1)
def run(self, args: argparse.Namespace) -> None:
os.environ["TORCHX_CONTEXT_NAME"] = os.getenv("TORCHX_CONTEXT_NAME", "cli_run")
with get_runner() as runner:
self._run(runner, args)
def _wait_and_exit(self, runner: Runner, app_handle: str, log: bool) -> None:
logger.info("Waiting for the app to finish...")
log_thread = self._start_log_thread(runner, app_handle) if log else None
status = runner.wait(app_handle, wait_interval=1)
if not status:
raise RuntimeError(f"unknown status, wait returned {status}")
logger.info(f"Job finished: {status.state}")
if log_thread:
log_thread.join()
if status.state != specs.AppState.SUCCEEDED:
logger.error(status)
sys.exit(1)
else:
logger.debug(status)
def _start_log_thread(self, runner: Runner, app_handle: str) -> threading.Thread:
thread = threading.Thread(
target=get_logs,
kwargs={
"file": sys.stderr,
"runner": runner,
"identifier": app_handle,
"regex": None,
"should_tail": True,
},
)
thread.daemon = True
thread.start()
return thread
| 1.882813
| 2
|
funds_brazil.py
|
brunoalvoliv/curva-de-juros
| 0
|
12782325
|
#Bibliotecas
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import investpy as py
plt.style.use('fivethirtyeight')
#Buscando dados
bonds = py.get_bonds_overview(country='brazil')
#print(bonds)
print('')
#Filtrando por nome e preço de fechamento
bonds2 = py.get_bonds_overview(country='brazil')[['name', 'last_close']]
#print(bonds2)
#Visualização:
plt.figure(figsize=(12, 6));
plt.title('Curva de Juros - Brazilians bonds');
plt.errorbar(bonds2.index, bonds2.last_close, marker='o', label='Curva de juros', color='blue', linewidth=1);
#plt.xlabel('Nome');
plt.ylabel('Valores de fechamento');
plt.xticks(bonds2.index, bonds2.name);
plt.legend()
plt.show();
'''#Outra forma:
pesq_fundos = py.funds.search_funds(by='name', value='Cdi')
print(pesq_fundos.head(10))
#Escolhendo o fundo
fundo = pesq_fundos['name'][1]
print(fundo)
#Buscando os dados
data = py.get_fund_historical_data(fund=fundo, country='brazil', from_date='01/01/2020', to_date='30/11/2021')['Close']
print(data.head())
retorno = data.pct_change().iloc[1:]
retorno_acum = (1 + retorno).cumprod()
#Visualização
plt.figure(figsize=(12, 6));
plt.title('Curva de Juros - Brazilians bonds');
plt.errorbar(retorno_acum.index, retorno_acum, label='Curva de juros', color='blue', linewidth=1)
plt.show()'''
| 2.921875
| 3
|
grades/migrations/0018_remove_max_validation_final_grade.py
|
Wassaf-Shahzad/micromasters
| 32
|
12782326
|
<reponame>Wassaf-Shahzad/micromasters
# Generated by Django 2.1.5 on 2019-03-07 06:35
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grades', '0017_micromastersprogramcommendation'),
]
operations = [
migrations.AlterField(
model_name='finalgrade',
name='grade',
field=models.FloatField(null=True, validators=[django.core.validators.MinValueValidator(0.0)]),
),
]
| 1.703125
| 2
|
SimpleCV/MachineLearning/TestTemporalColorTracker.py
|
tpltnt/SimpleCV
| 8
|
12782327
|
<reponame>tpltnt/SimpleCV
from SimpleCV import Camera, Image, Color, TemporalColorTracker, ROI, Display
import matplotlib.pyplot as plt
cam = Camera(1)
tct = TemporalColorTracker()
img = cam.getImage()
roi = ROI(img.width*0.45,img.height*0.45,img.width*0.1,img.height*0.1,img)
tct.train(cam,roi=roi,maxFrames=250,pkWndw=20)
# Matplot Lib example plotting
plotc = {'r':'r','g':'g','b':'b','i':'m','h':'y'}
for key in tct.data.keys():
plt.plot(tct.data[key],plotc[key])
for pt in tct.peaks[key]:
plt.plot(pt[0],pt[1],'r*')
for pt in tct.valleys[key]:
plt.plot(pt[0],pt[1],'b*')
plt.grid()
plt.show()
disp = Display((800,600))
while disp.isNotDone():
img = cam.getImage()
result = tct.recognize(img)
plt.plot(tct._rtData,'r-')
plt.grid()
plt.savefig('temp.png')
plt.clf()
plotImg = Image('temp.png')
roi = ROI(img.width*0.45,img.height*0.45,img.width*0.1,img.height*0.1,img)
roi.draw(width=3)
img.drawText(str(result),20,20,color=Color.RED,fontsize=32)
img = img.applyLayers()
img = img.blit(plotImg.resize(w=img.width,h=img.height),pos=(0,0),alpha=0.5)
img.save(disp)
| 2.484375
| 2
|
server/TimeSeriesJoiner/stream_join_engine.py
|
iot-salzburg/panta-rhei
| 6
|
12782328
|
#!/usr/bin/env python3
"""This engine enables to customize the stream joining very flexible by importing only few lines of code that
define customized functionality. This framework ensures exactly-once time-series processing that are based on joins
using the local stream buffering algorithm with Apache Kafka.
Import constants and 'ingest_fct()' and 'on_join()' to customize the processing.
A join rate of around 15000 time-series joins per second is reached with a exactly-once semantic for
the consume-join-produce procedures using Apache Kafka.
Don't forget to start the demo producers in in advance in order to produce records into the Kafka topic.
"""
import os
import sys
import socket
import time
import json
from datetime import datetime
import pytz
from confluent_kafka import Producer, Consumer, TopicPartition
try:
from .LocalStreamBuffer.local_stream_buffer import Record, StreamBuffer, record_from_dict
except (ModuleNotFoundError, ImportError):
# noinspection PyUnresolvedReferences
from LocalStreamBuffer.local_stream_buffer import Record, StreamBuffer, record_from_dict
def delivery_report(err, msg):
"""Delivery callback for Kafka Produce. Called once for each message produced to indicate delivery result.
Triggered by poll() or flush(). """
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
if VERBOSE:
# get the sent message using msg.value()
print(f"Message '{msg.key().decode('utf-8')}' \tdelivered to topic '{msg.topic()}' [{msg.partition()}].")
# define customized function for join
def join_fct(record_left, record_right):
try:
# create a record dictionary from both join partners
record_dict = on_join(record_left, record_right)
if record_dict is not None:
# adapt two time fields of the record
record_dict["processingTime"] = time.time()
if USE_ISO_TIMESTAMPS:
record_dict["phenomenonTime"] = to_iso_time(record_dict.get("phenomenonTime"))
record_dict["processingTime"] = to_iso_time(record_dict.get("processingTime"))
# produce a Kafka message, the delivery report callback, the key must be thing + quantity
kafka_producer.produce(f"{TARGET_SYSTEM}.ext", json.dumps(record_dict).encode('utf-8'),
key=f"{record_dict.get('thing')}.{record_dict.get('quantity')}".encode('utf-8'),
callback=delivery_report)
except Exception as ex: # this block catches possible errors in custom code
print(f"WARNING, Exception while joining streams: {ex}")
print(f"left record: {record_left}")
print(f"right record: {record_right}")
raise ex
def commit_transaction(verbose=False, commit_time=time.time()):
# Send the consumer's position to transaction to commit them along with the transaction, committing both
# input and outputs in the same transaction is what provides EOS.
kafka_producer.send_offsets_to_transaction(
kafka_consumer.position(kafka_consumer.assignment()),
kafka_consumer.consumer_group_metadata())
# Commit the transaction
kafka_producer.commit_transaction()
# Begin new transaction
kafka_producer.begin_transaction()
# commit the offset of the latest records that got obsolete in order to consume and join always the same Records.
latest_records = []
if stream_buffer.last_removed_left:
latest_records.append(stream_buffer.last_removed_left.data.get("record"))
if stream_buffer.last_removed_right:
latest_records.append(stream_buffer.last_removed_right.data.get("record"))
# Commit message’s offset + 1
kafka_consumer.commit(offsets=[TopicPartition(topic=rec.get("topic"),
partition=rec.get("partition"),
offset=rec.get("offset") + 1) # commit the next (n+1) offset
for rec in latest_records])
if verbose:
print(f"Committed to latest offsets at {commit_time:.6f}.")
def to_iso_time(timestamp):
"""Receives an arbitrary timestamp in UTC format (most likely in unix timestamp) and returns it as ISO-format.
:param timestamp: arbitrary timestamp
:return: timestamp in ISO 8601 and UTC timezone
"""
if isinstance(timestamp, (int, float)):
return datetime.utcfromtimestamp(timestamp).replace(tzinfo=pytz.UTC).isoformat()
if timestamp is None:
return datetime.utcnow().replace(tzinfo=pytz.UTC).isoformat()
return timestamp
if __name__ == "__main__":
# Import the original, or if used in Docker the overwritten custom functions
try:
from .customization.custom_fct import *
except (ModuleNotFoundError, ImportError):
# noinspection PyUnresolvedReferences
from customization.custom_fct import *
if "--use-env-config" in sys.argv:
print(f"Load environment variables: {os.environ}")
try:
STREAM_NAME = os.environ["STREAM_NAME"]
SOURCE_SYSTEMS = os.environ["SOURCE_SYSTEM"]
TARGET_SYSTEM = os.environ["TARGET_SYSTEM"]
GOST_SERVER = os.environ["GOST_SERVER"]
KAFKA_BOOTSTRAP_SERVERS = os.environ["KAFKA_BOOTSTRAP_SERVERS"]
FILTER_LOGIC = os.environ["FILTER_LOGIC"]
# Execute the customization passed as filter logic to load necessary constants and function.
exec(FILTER_LOGIC)
_ = TIME_DELTA # Check if it worked
except Exception as e:
print("Could not load config.")
raise e
print(f"Starting the stream join with the following configurations: "
f"\n\tKAFKA_BOOTSTRAP_SERVERS: '{KAFKA_BOOTSTRAP_SERVERS}'"
f"\n\tSTREAM_NAME: '{STREAM_NAME}'"
f"\n\tSOURCE_SYSTEMS: '{SOURCE_SYSTEMS}'"
f"\n\tTARGET_SYSTEM: '{TARGET_SYSTEM}'"
f"\n\tTIME_DELTA: '{TIME_DELTA}'"
f"\n\tADDITIONAL_ATTRIBUTES: '{ADDITIONAL_ATTRIBUTES}'")
# Create a kafka producer and consumer instance and subscribe to the topics
kafka_consumer = Consumer({
'bootstrap.servers': KAFKA_BOOTSTRAP_SERVERS,
'group.id': f"TS-joiner_{socket.gethostname()}_1",
'auto.offset.reset': 'earliest',
'enable.auto.commit': False,
'enable.auto.offset.store': False
})
kafka_topics_in = [f"{sys}.int" for sys in SOURCE_SYSTEMS.split(",")]
kafka_consumer.subscribe(kafka_topics_in)
# kafka_consumer.assign([TopicPartition(topic, 0) for topic in kafka_topics_in]) # manually assign to an offset
# Create a Kafka producer
kafka_producer = Producer({'bootstrap.servers': KAFKA_BOOTSTRAP_SERVERS,
"transactional.id": f'ms-stream-app_{SOURCE_SYSTEMS}_{STREAM_NAME}'})
# Initialize producer transaction.
kafka_producer.init_transactions()
# Start producer transaction.
kafka_producer.begin_transaction()
print("Create a StreamBuffer instance.")
stream_buffer = StreamBuffer(instant_emit=True, buffer_results=False,
verbose=VERBOSE, join_function=join_fct)
start_time = last_transaction_time = time.time()
n_none_polls = 0
started = False
try:
print("Start the Stream Processing.")
while True:
# Here, a small timeout can be used, as the commit is done manually and based on TRANSACTION_TIME
msgs = kafka_consumer.consume(num_messages=MAX_BATCH_SIZE, timeout=0.2)
# iterate over each message that was consumed
for msg in msgs:
record_json = json.loads(msg.value().decode('utf-8'))
if VERBOSE:
print(f"Received new record: {record_json}")
# create a Record from the json
additional_attributes = {att: record_json.get(att.strip()) for att in ADDITIONAL_ATTRIBUTES.split(",")
if att != ""}
record = Record(
thing=record_json.get("thing"),
quantity=record_json.get("quantity"),
timestamp=record_json.get("phenomenonTime"),
result=record_json.get("result"),
topic=msg.topic(), partition=msg.partition(), offset=msg.offset(),
**additional_attributes)
ingest_fct(record, stream_buffer)
# commit the transaction every TRANSACTION_TIME
cur_time = time.time()
if cur_time >= last_transaction_time + TRANSACTION_TIME:
last_transaction_time = cur_time
commit_transaction(verbose=VERBOSE, commit_time=last_transaction_time)
except KeyboardInterrupt:
print("Gracefully stopping")
finally:
stop_time = time.time()
# commit processed message offsets to the transaction
kafka_producer.send_offsets_to_transaction(
kafka_consumer.position(kafka_consumer.assignment()),
kafka_consumer.consumer_group_metadata())
# commit transaction
kafka_producer.commit_transaction()
# Leave group and commit offsets
kafka_consumer.close()
print(f"\nRecords in |{TARGET_SYSTEM}| = {stream_buffer.get_join_counter()}, "
f"|left buffer| = {stream_buffer.get_left_counter()}, "
f"|right buffer| = {stream_buffer.get_right_counter()}.")
if start_time != stop_time:
print(f"Joined time-series {stop_time - start_time:.6f} s long, "
f"that are {stream_buffer.get_join_counter() / (stop_time - start_time):.2f} joins per second.")
| 2.390625
| 2
|
lib/whoosh/filedb/multiproc.py
|
ckolumbus/WikidPad.svn
| 2
|
12782329
|
#===============================================================================
# Copyright 2010 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
import os
from multiprocessing import Process, Queue
from whoosh.filedb.filetables import LengthWriter, LengthReader
from whoosh.filedb.filewriting import SegmentWriter
from whoosh.filedb.pools import (imerge, PoolBase, read_run, TempfilePool,
write_postings)
from whoosh.filedb.structfile import StructFile
from whoosh.writing import IndexWriter
from whoosh.util import now
# Multiprocessing writer
class SegmentWritingTask(Process):
def __init__(self, storage, indexname, segmentname, kwargs, postingqueue):
Process.__init__(self)
self.storage = storage
self.indexname = indexname
self.segmentname = segmentname
self.kwargs = kwargs
self.postingqueue = postingqueue
self.segment = None
self.running = True
def run(self):
pqueue = self.postingqueue
index = self.storage.open_index(self.indexname)
writer = SegmentWriter(index, name=self.segmentname, lock=False, **self.kwargs)
while self.running:
args = pqueue.get()
if args is None:
break
writer.add_document(**args)
if not self.running:
writer.cancel()
self.terminate()
else:
writer.pool.finish(writer.docnum, writer.lengthfile,
writer.termsindex, writer.postwriter)
self._segment = writer._getsegment()
def get_segment(self):
return self._segment
def cancel(self):
self.running = False
class MultiSegmentWriter(IndexWriter):
def __init__(self, index, procs=2, **writerargs):
self.index = index
self.lock = index.storage.lock(index.indexname + "_LOCK")
self.tasks = []
self.postingqueue = Queue()
#self.resultqueue = Queue()
names = [index._next_segment_name() for _ in xrange(procs)]
self.tasks = [SegmentWritingTask(index.storage, index.indexname,
segname, writerargs, self.postingqueue)
for segname in names]
for task in self.tasks:
task.start()
def add_document(self, **args):
self.postingqueue.put(args)
def cancel(self):
for task in self.tasks:
task.cancel()
self.lock.release()
def commit(self):
procs = len(self.tasks)
for _ in xrange(procs):
self.postingqueue.put(None)
for task in self.tasks:
print "Joining", task
task.join()
self.index.segments.append(task.get_segment())
self.index.commit()
self.lock.release()
# Multiprocessing pool
class PoolWritingTask(Process):
def __init__(self, schema, dir, postingqueue, resultqueue, limitmb):
Process.__init__(self)
self.schema = schema
self.dir = dir
self.postingqueue = postingqueue
self.resultqueue = resultqueue
self.limitmb = limitmb
def run(self):
pqueue = self.postingqueue
rqueue = self.resultqueue
subpool = TempfilePool(self.schema, limitmb=self.limitmb, dir=self.dir)
while True:
code, args = pqueue.get()
if code == -1:
doccount = args
break
if code == 0:
subpool.add_content(*args)
elif code == 1:
subpool.add_posting(*args)
elif code == 2:
subpool.add_field_length(*args)
lenfilename = subpool.unique_name(".lengths")
subpool._write_lengths(StructFile(open(lenfilename, "wb")), doccount)
subpool.dump_run()
rqueue.put((subpool.runs, subpool.fieldlength_totals(),
subpool.fieldlength_maxes(), lenfilename))
class MultiPool(PoolBase):
def __init__(self, schema, dir=None, procs=2, limitmb=32, **kw):
PoolBase.__init__(self, schema, dir=dir)
self.procs = procs
self.limitmb = limitmb
self.postingqueue = Queue()
self.resultsqueue = Queue()
self.tasks = [PoolWritingTask(self.schema, self.dir, self.postingqueue,
self.resultsqueue, self.limitmb)
for _ in xrange(procs)]
for task in self.tasks:
task.start()
def add_content(self, *args):
self.postingqueue.put((0, args))
def add_posting(self, *args):
self.postingqueue.put((1, args))
def add_field_length(self, *args):
self.postingqueue.put((2, args))
def cancel(self):
for task in self.tasks:
task.terminate()
self.cleanup()
def cleanup(self):
pass
def finish(self, doccount, lengthfile, termtable, postingwriter):
_fieldlength_totals = self._fieldlength_totals
if not self.tasks:
return
pqueue = self.postingqueue
rqueue = self.resultsqueue
for _ in xrange(self.procs):
pqueue.put((-1, doccount))
#print "Joining..."
t = now()
for task in self.tasks:
task.join()
#print "Join:", now() - t
#print "Getting results..."
t = now()
runs = []
lenfilenames = []
for task in self.tasks:
taskruns, flentotals, flenmaxes, lenfilename = rqueue.get()
runs.extend(taskruns)
lenfilenames.append(lenfilename)
for fieldnum, total in flentotals.iteritems():
_fieldlength_totals[fieldnum] += total
for fieldnum, length in flenmaxes.iteritems():
if length > self._fieldlength_maxes.get(fieldnum, 0):
self._fieldlength_maxes[fieldnum] = length
#print "Results:", now() - t
#print "Writing lengths..."
t = now()
lw = LengthWriter(lengthfile, doccount)
for lenfilename in lenfilenames:
sublengths = LengthReader(StructFile(open(lenfilename, "rb")), doccount)
lw.add_all(sublengths)
os.remove(lenfilename)
lw.close()
lengths = lw.reader()
#print "Lengths:", now() - t
t = now()
iterator = imerge([read_run(runname, count) for runname, count in runs])
total = sum(count for runname, count in runs)
write_postings(self.schema, termtable, lengths, postingwriter, iterator)
for runname, count in runs:
os.remove(runname)
#print "Merge:", now() - t
self.cleanup()
| 2.28125
| 2
|
aerie/utils.py
|
alex-oleshkevich/aerie
| 6
|
12782330
|
<filename>aerie/utils.py<gh_stars>1-10
import typing as t
from contextlib import contextmanager
from sqlalchemy.exc import MultipleResultsFound, NoResultFound
from aerie.exceptions import NoResultsError, TooManyResultsError
@contextmanager
def convert_exceptions() -> t.Generator[None, None, None]:
try:
yield
except MultipleResultsFound as exc:
raise TooManyResultsError() from exc
except NoResultFound:
raise NoResultsError('No rows found when one was required.')
ITEM = t.TypeVar('ITEM')
def chunked(items: t.Iterable[ITEM], size: int) -> t.Generator[t.List[ITEM], None, None]:
result = []
for value in items:
result.append(value)
if len(result) == size:
yield result
result = []
if len(result):
yield result
def colorize(sql: str) -> str:
try:
import pygments
import pygments.formatters
import pygments.lexers
lexer = pygments.lexers.get_lexer_by_name("sql")
formatter = pygments.formatters.get_formatter_by_name("console")
sql = pygments.highlight(sql, lexer, formatter)
except ImportError:
pass
return sql
| 2.3125
| 2
|
setup.py
|
AstroMatt/book-apollo-moon-experiments-alsep
| 0
|
12782331
|
<filename>setup.py
#!/usr/bin/env python3
from datetime import datetime, timezone
from os import makedirs
from os.path import dirname, abspath, join, basename
from shlex import split
from shutil import rmtree
from subprocess import run
FORMAT = 'singlehtml'
SECOND = 1
MINUTE = 60 * SECOND
START_TIME = datetime.now()
sourcedir = dirname(abspath(__file__))
project_name = basename(sourcedir)
outputdir = join('/tmp/', project_name)
rmtree(outputdir, ignore_errors=True)
makedirs(outputdir, exist_ok=True)
run('clear')
cmd = split(f'sphinx-build -a -E -j auto --color -b {FORMAT} {sourcedir} {outputdir}')
run(cmd)
last = run('git log -1 --format="%ad" --date=iso', shell=True, capture_output=True).stdout.strip().decode()
last = datetime.strptime(last, '%Y-%m-%d %H:%M:%S %z')
delta = datetime.now(tz=timezone.utc) - last
since = round(delta.total_seconds() / MINUTE)
duration = datetime.now() - START_TIME
duration_seconds = round(duration.total_seconds())
duration_minutes = round(duration_seconds / MINUTE, 1)
print(f'\n\n')
print(f'Build took: {duration_seconds} seconds ({duration_minutes} minutes)')
print(f'Last commit: {last}')
print(f'Since: {since}m')
| 2.34375
| 2
|
web/djangoappengine/db/utils.py
|
bdelliott/wordgame
| 2
|
12782332
|
<filename>web/djangoappengine/db/utils.py<gh_stars>1-10
from google.appengine.datastore.datastore_query import Cursor
class CursorQueryMixin(object):
def clone(self, *args, **kwargs):
kwargs['_gae_cursor'] = getattr(self, '_gae_cursor', None)
kwargs['_gae_start_cursor'] = getattr(self, '_gae_start_cursor', None)
kwargs['_gae_end_cursor'] = getattr(self, '_gae_end_cursor', None)
return super(CursorQueryMixin, self).clone(*args, **kwargs)
def get_cursor(queryset):
# Evaluate QuerySet
len(queryset)
cursor = getattr(queryset.query, '_gae_cursor', None)
return Cursor.to_websafe_string(cursor)
def set_cursor(queryset, start=None, end=None):
queryset = queryset.all()
class CursorQuery(CursorQueryMixin, queryset.query.__class__):
pass
queryset.query = queryset.query.clone(klass=CursorQuery)
if start is not None:
start = Cursor.from_websafe_string(start)
queryset.query._gae_start_cursor = start
if end is not None:
end = Cursor.from_websafe_string(end)
queryset.query._gae_end_cursor = end
return queryset
| 2.15625
| 2
|
brainrender/Utils/paths_manager.py
|
FedeClaudi/brainrender
| 0
|
12782333
|
<filename>brainrender/Utils/paths_manager.py<gh_stars>0
import sys
import os
from brainrender.Utils.data_io import save_json
"""
Class to create and store paths to a number of folders uesed to save/load data
"""
# Default paths for Data Folders (store stuff like object meshes, neurons morphology data etc)
default_paths = dict(
# BRAIN REGIONS MESHES
mouse_meshes= "Data/Meshes/Mouse", # allen brain atlas .obj meshes file, downloaded through allen API
rat_meshes= "Data/Meshes/Rat", # meshes with rat brain data, to be downloaded
drosophila_meshes= "Data/Meshes/Drosophila", # meshes with drosophila brain data, to be downloaded
other_meshes= "Data/Meshes/Other", # any other mesh the user might want to store
metadata= "Data/Metadata",
# OUTPUT Folders
output_screenshots= "Output/Screenshots",
output_videos= "Output/Videos",
output_scenes= "Output/Scenes",
output_data= "Output/Data",
# User folder
user= "User",
# ----------------------- Folder for allen brain atlas ----------------------- #
# NEURONS MORPHOLOGY
morphology_allen= "Data/Morphology/Allen", # .swc files with neurons morphology downloaded through allen API
morphology_cache= "Data/Morphology/cache",
morphology_mouselight= "Data/Morphology/MouseLight", # .swc and .json files from mouse light dataset
# Allen caches
mouse_connectivity_cache= "Data/ABA/MCC",
mouse_celltype_cache= "Data/ABA/MCTC",
annotated_volume_fld = "Data/ABA",
mouse_connectivity_volumetric="Data/ABA/Volumetric",
mouse_connectivity_volumetric_cache="Data/ABA/Volumetric/cache",
# Streamlines cache
streamlines_cache= "Data/Streamlines",
# ------------------- Folders for the insect brain db atlas ------------------ #
ibdb_meshes_folder = "Data/InsectsDBs",
# -------------------------- Folders for zfish atlas ------------------------- #
zfish_meshes_folder = "Data/Zfish",
)
class Paths:
_folders = ["mouse_meshes",
"other_meshes",
"morphology_allen",
"morphology_cache",
"morphology_mouselight",
"mouse_connectivity_cache",
"mouse_celltype_cache",
"streamlines_cache",
"output_screenshots",
"output_videos",
"output_scenes",
"output_data",
"user",
"metadata",
'annotated_volume_fld',
'mouse_connectivity_volumetric',
'mouse_connectivity_volumetric_cache',
'ibdb_meshes_folder',
'zfish_meshes_folder']
def __init__(self, base_dir=None, **kwargs):
"""
Parses a YAML file to get data folders paths. Stores paths to a number of folders used throughtout brainrender.
Other classes (e.g. brainrender.Scene) subclass Paths.
:param base_dir: str with path to directory to use to save data. If none the user's base directiry is used.
:param kwargs: use the name of a folder as key and a path as argument to specify the path of individual subfolders
"""
# Get and make base directory
if base_dir is None:
user_dir = os.path.expanduser("~")
if not os.path.isdir(user_dir):
raise FileExistsError("Could not find user base folder (to save brainrender data). Platform: {}".format(sys.platform))
self.base_dir = os.path.join(user_dir, ".brainrender")
else:
self.base_dir = base_dir
if not os.path.isdir(self.base_dir):
os.mkdir(self.base_dir)
for fld_name in self._folders:
# Check if user provided a path for this folder, otherwise use default
fld_path = kwargs.pop(fld_name, default_paths[fld_name])
# Make complete path and save it as an attribute of this class
path = os.path.join(self.base_dir, fld_path)
# Create folder if it doesn't exist
if not os.path.isdir(path):
print("Creating folder at: {}".format(path))
os.makedirs(path)
self.__setattr__(fld_name, path)
# Make a file for morphology cache metadata
self.morphology_cache_metadata = os.path.join(self.morphology_cache, 'metadata.json')
if not os.path.isfile(self.morphology_cache_metadata):
save_json(self.morphology_cache_metadata, {})
| 2.625
| 3
|
timetracker/sheets/tasks.py
|
tm-kn/CHT2520-assignment2
| 0
|
12782334
|
<reponame>tm-kn/CHT2520-assignment2
from timetracker.celery import app
from timetracker.sheets.models import TimeSheet
@app.task
def generate_csv_file_for_timesheet(sheet_id, end_datetime):
sheet = TimeSheet.objects.get(pk=sheet_id)
sheet.generate_csv_file()
| 2.234375
| 2
|
ml4vision/ml/__init__.py
|
ml4vision/ml4vision-py
| 0
|
12782335
|
try:
import torch
except ImportError:
raise ImportError(
"ml4vision.ml requires the pytorch library. Please run: pip install ml4vision-py[ml]"
) from None
| 1.617188
| 2
|
ConvLSTMCEll.py
|
Mo0nl19ht/convlstm-seq2seq-attention
| 1
|
12782336
|
<gh_stars>1-10
import tensorflow as tf
from tensorflow import keras
import tensorflow_addons as tfa
from Self_Attention_Memory_Module import Self_Attention_Memory_Module
class ConvLSTMCell(tf.keras.Model):
def __init__(self, hidden_dim,att_hidden_dim, kernel_size, bias):
super(ConvLSTMCell, self).__init__()
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.bias = bias
self.attention_layer = Self_Attention_Memory_Module(att_hidden_dim,kernel_size)
self.conv = tf.keras.layers.Conv2D(
filters = 4 * self.hidden_dim,
kernel_size = self.kernel_size,
padding = 'same',
use_bias = self.bias,
)
self.group_norm =tfa.layers.GroupNormalization(groups=4 * self.hidden_dim, axis=-1)
def call(self, input_tensor, cur_state):
h_cur, c_cur, m_cur = cur_state
transposed_input = tf.transpose(input_tensor,perm=[0,3,1,2])
# print(transposed_input.shape)
# print(input_tensor.shape,h_cur.shape)
combined = tf.concat([input_tensor, h_cur], axis=-1)
# print(combined.shape)
combined_conv = self.conv(combined)
normalized_conv = self.group_norm(combined_conv)
# print(normalized_conv.shape)
# num_or_size_splits 이거 self.hidden_dim으로 바꿀수도 원래는 axis=-1 , num_or = 4였음
cc_i, cc_f, cc_o, cc_g = tf.split(normalized_conv, num_or_size_splits=4, axis=-1)
i = tf.keras.activations.sigmoid(cc_i)
f = tf.keras.activations.sigmoid(cc_f)
o = tf.keras.activations.sigmoid(cc_o)
g = tf.keras.activations.tanh(cc_g)
c_next = f*c_cur+i*g
h_next = o*tf.keras.activations.tanh(c_next)
# attention
h_next, m_next = self.attention_layer(h_next,m_cur)
return (h_next,c_next,m_next)
def init_hidden(self, batch_size, image_size):
height, width = image_size
return (tf.zeros([batch_size, height, width,self.hidden_dim]),
tf.zeros([batch_size, height, width,self.hidden_dim]),
tf.zeros([batch_size, height, width,self.hidden_dim])
)
| 2.515625
| 3
|
lib/binlog_stream_reader_wrapper.py
|
jschell12/mysql_binlog_kinesis_producer
| 0
|
12782337
|
import datetime
from enum import Enum
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import (
DeleteRowsEvent,
UpdateRowsEvent,
WriteRowsEvent,
TableMapEvent
)
from pymysqlreplication.event import (
BeginLoadQueryEvent,
ExecuteLoadQueryEvent,
QueryEvent,
RotateEvent,
HeartbeatLogEvent
)
from lib.utils import Utils
class EventType(Enum):
LOG_STATE = 1
INSERT = 2
UPDATE = 3
DELETE = 4
TABLE = 5
OTHER = 6
class BinLogStreamReaderWrapper(object):
''' Wrapper class for the python-mysql-replication library '''
def __init__(self, mysql_settings,server_id=1,blocking=False, resume_stream=True, log_file=None, log_pos=None, slave_heartbeat=None):
self.__stream = BinLogStreamReader(
connection_settings = mysql_settings,
server_id = server_id,
blocking = blocking,
resume_stream = resume_stream,
only_events = [DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent, TableMapEvent, BeginLoadQueryEvent, ExecuteLoadQueryEvent, QueryEvent], # RotateEvent, QueryEvent, HeartbeatLogEvent
log_file=log_file,
log_pos=log_pos,
slave_heartbeat=slave_heartbeat
)
def close(self):
self.__stream.close()
def fetch_event(self):
return self.__parse_event(self.__stream.fetchone())
def __iter__ (self):
return iter(self.fetch_event, None)
def __parse_event(self, binlogevent):
event = {
'event_type': self.__get_event_type(binlogevent),
'pymysqlreplication_event_type': type(binlogevent).__name__,
'timestamp': binlogevent.timestamp,
'log_pos': binlogevent.packet.log_pos,
'log_file': self.__stream.log_file
}
if self.__is_query_event(binlogevent):
event['log_pos'] = binlogevent.packet.log_pos
event['log_file'] = self.__stream.log_file
elif self.__is_rotate_event(binlogevent):
event['log_pos'] = binlogevent.position
event['log_file'] = binlogevent.next_binlog
elif self.__is_row_event(binlogevent) or self.__is_table_event(binlogevent):
if binlogevent.schema != 'auth': # For security
event['schema'] = binlogevent.schema
event['table'] = binlogevent.table
if self.__is_row_event(binlogevent):
for row in binlogevent.rows:
event['primary_key'] = binlogevent.primary_key
event['after_values'] = self.__get_before_values(binlogevent, row)
event['before_values'] = self.__get_after_values(binlogevent, row)
elif self.__is_heartbeat_event(binlogevent):
event['log_file'] = binlogevent.ident
return event
def __get_event_type(self, binlogevent):
event_type = None
if self.__is_heartbeat_event(binlogevent) or self.__is_rotate_event(binlogevent) or self.__is_heartbeat_event(binlogevent):
event_type = EventType.LOG_STATE
elif self.__is_delete_event(binlogevent):
event_type = EventType.DELETE
elif self.__is_update_event(binlogevent):
event_type = EventType.UPDATE
elif self.__is_insert_event(binlogevent):
event_type = EventType.INSERT
elif self.__is_table_event(binlogevent):
event_type = EventType.TABLE
else:
event_type = EventType.OTHER
return event_type
def __get_before_values(self, binlogevent, row):
before_values = None
if isinstance(binlogevent, UpdateRowsEvent):
before_values = row['before_values']
elif isinstance(binlogevent, DeleteRowsEvent):
before_values = row['values']
return before_values
def __get_after_values(self, binlogevent, row):
after_values = None
if isinstance(binlogevent, WriteRowsEvent):
after_values = row['values']
elif isinstance(binlogevent, UpdateRowsEvent):
after_values = row['after_values']
return after_values
def __is_row_event(self, binlogevent):
return self.__is_insert_event(binlogevent) or self.__is_update_event(binlogevent) or self.__is_delete_event(binlogevent)
def __is_delete_event(self, binlogevent):
return isinstance(binlogevent, DeleteRowsEvent)
def __is_update_event(self, binlogevent):
return isinstance(binlogevent, UpdateRowsEvent)
def __is_insert_event(self, binlogevent):
return isinstance(binlogevent, WriteRowsEvent)
def __is_table_event(self, binlogevent):
return isinstance(binlogevent, (TableMapEvent))
def __is_query_event(self, binlogevent):
return isinstance(binlogevent, (QueryEvent))
def __is_begin_query_event(self, binlogevent):
return isinstance(binlogevent, (BeginLoadQueryEvent))
def __is_load_query_event(self, binlogevent):
return isinstance(binlogevent, (ExecuteLoadQueryEvent))
def __is_rotate_event(self, binlogevent):
return isinstance(binlogevent, (RotateEvent))
def __is_heartbeat_event(self, binlogevent):
return isinstance(binlogevent, (HeartbeatLogEvent))
| 2.203125
| 2
|
medium/148. Sort List.py
|
junyinglucn/leetcode
| 0
|
12782338
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def sortList(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
curr, length = head, 0
while curr:
curr, length = curr.next, length + 1
root = ListNode(0)
root.next = head
intv = 1
while intv < length:
merge, curr = root, root.next
while curr:
h1, intv_1 = curr, intv
while curr and intv_1:
curr, intv_1 = curr.next, intv_1 - 1
if intv_1:
break
h2, intv_2 = curr, intv
while curr and intv_2:
curr, intv_2 = curr.next, intv_2 - 1
len1, len2 = intv, intv - intv_2
while len1 and len2:
if h1.val < h2.val:
merge.next, h1, len1 = h1, h1.next, len1 - 1
else:
merge.next, h2, len2 = h2, h2.next, len2 - 1
merge = merge.next
if len1:
merge.next = h1
else:
merge.next = h2
while len1 > 0 or len2 > 0:
merge, len1, len2 = merge.next, len1 - 1, len2 - 1
merge.next = curr
intv *= 2
return root.next
| 3.921875
| 4
|
sangwoo_example.py
|
loganlebanoff/correct_summarization
| 2
|
12782339
|
<gh_stars>1-10
from tqdm import tqdm
import glob
from data import example_generator # The module "data" is from Abigail See's code
import json
dataset_split = 'test'
source_dir = os.path.expanduser('~') + '/data/tf_data/with_coref_and_ssi/cnn_dm'
names_to_types = [('raw_article_sents', 'string_list'), ('similar_source_indices', 'delimited_list_of_lists'), ('summary_text', 'string'), ('corefs', 'json')]
def decode_text(text):
try:
text = text.decode('utf-8')
except:
try:
text = text.decode('latin-1')
except:
raise
return text
def unpack_tf_example(example, names_to_types):
def get_string(name):
return decode_text(example.features.feature[name].bytes_list.value[0])
def get_string_list(name):
texts = get_list(name)
texts = [decode_text(text) for text in texts]
return texts
def get_list(name):
return example.features.feature[name].bytes_list.value
def get_delimited_list(name):
text = get_string(name)
return text.split(' ')
def get_delimited_list_of_lists(name):
text = get_string(name)
return [[int(i) for i in (l.split(' ') if l != '' else [])] for l in text.split(';')]
def get_delimited_list_of_tuples(name):
list_of_lists = get_delimited_list_of_lists(name)
return [tuple(l) for l in list_of_lists]
def get_json(name):
text = get_string(name)
return json.loads(text)
func = {'string': get_string,
'list': get_list,
'string_list': get_string_list,
'delimited_list': get_delimited_list,
'delimited_list_of_lists': get_delimited_list_of_lists,
'delimited_list_of_tuples': get_delimited_list_of_tuples,
'json': get_json}
res = []
for name, type in names_to_types:
if name not in example.features.feature:
raise Exception('%s is not a feature of TF Example' % name)
res.append(func[type](name))
return res
source_files = sorted(glob.glob(source_dir + '/' + dataset_split + '*'))
total = len(source_files) * 1000
example_generator = example_generator(source_dir + '/' + dataset_split + '*', True)
for example in tqdm(example_generator, total=total):
raw_article_sents, similar_source_indices_list, summary_text, corefs = unpack_tf_example(example, names_to_types)
groundtruth_summ_sents = [sent.strip() for sent in summary_text.strip().split('\n')]
for summary_sent_idx, source_sent_indices in enumerate(similar_source_indices_list):
print('SUMMARY SENTENCE:')
print('------------------------------')
print(groundtruth_summ_sents[summary_sent_idx] + '\n')
print('SOURCE SENTENCE(S):')
print('------------------------------')
for sent_idx in source_sent_indices:
print(raw_article_sents[sent_idx] + '\n')
print('')
| 2.453125
| 2
|
tests/test_http.py
|
sylwekb/apistar
| 0
|
12782340
|
<filename>tests/test_http.py
from apistar import App, Route, http
from apistar.test import TestClient
def get_method(method: http.Method) -> http.Response:
return http.Response({'method': method})
def get_scheme(scheme: http.Scheme) -> http.Response:
return http.Response({'scheme': scheme})
def get_host(host: http.Host) -> http.Response:
return http.Response({'host': host})
def get_port(port: http.Port) -> http.Response:
return http.Response({'port': port})
def get_root_path(root_path: http.RootPath) -> http.Response:
return http.Response({'root_path': root_path})
def get_path(path: http.Path) -> http.Response:
return http.Response({'path': path})
def get_query_string(query_string: http.QueryString) -> http.Response:
return http.Response({'query_string': query_string})
def get_query_params(query_params: http.QueryParams) -> http.Response:
return http.Response({'query_params': query_params.to_dict(flat=False)})
def get_page_query_param(page: http.QueryParam) -> http.Response:
return http.Response({'page': page})
def get_url(url: http.URL) -> http.Response:
return http.Response({'url': url})
def get_body(body: http.Body) -> http.Response:
return http.Response({'body': body.decode('utf-8')})
def get_headers(headers: http.Headers) -> http.Response:
return http.Response({'headers': dict(headers)})
def get_accept_header(accept: http.Header) -> http.Response:
return http.Response({'accept': accept})
def get_request(request: http.Request) -> http.Response:
return http.Response({
'method': request.method,
'url': request.url,
'headers': dict(request.headers)
})
app = App(routes=[
Route('/method/', 'get', get_method),
Route('/method/', 'post', get_method),
Route('/scheme/', 'get', get_scheme),
Route('/host/', 'get', get_host),
Route('/port/', 'get', get_port),
Route('/root_path/', 'get', get_root_path),
Route('/path/', 'get', get_path),
Route('/query_string/', 'get', get_query_string),
Route('/query_params/', 'get', get_query_params),
Route('/page_query_param/', 'get', get_page_query_param),
Route('/url/', 'get', get_url),
Route('/body/', 'post', get_body),
Route('/headers/', 'get', get_headers),
Route('/accept_header/', 'get', get_accept_header),
Route('/request/', 'get', get_request),
])
client = TestClient(app)
def test_method():
response = client.get('http://example.com/method/')
assert response.json() == {'method': 'GET'}
response = client.post('http://example.com/method/')
assert response.json() == {'method': 'POST'}
def test_scheme():
response = client.get('http://example.com/scheme/')
assert response.json() == {'scheme': 'http'}
response = client.get('https://example.com/scheme/')
assert response.json() == {'scheme': 'https'}
def test_host():
response = client.get('http://example.com/host/')
assert response.json() == {'host': 'example.com'}
def test_port():
response = client.get('http://example.com/port/')
assert response.json() == {'port': 80}
response = client.get('https://example.com/port/')
assert response.json() == {'port': 443}
response = client.get('http://example.com:123/port/')
assert response.json() == {'port': 123}
response = client.get('https://example.com:123/port/')
assert response.json() == {'port': 123}
def test_root_path():
response = client.get('http://example.com/root_path/')
assert response.json() == {'root_path': ''}
def test_path():
response = client.get('http://example.com/path/')
assert response.json() == {'path': '/path/'}
def test_query_string():
response = client.get('http://example.com/query_string/')
assert response.json() == {'query_string': ''}
response = client.get('http://example.com/query_string/?a=1&a=2&b=3')
assert response.json() == {'query_string': 'a=1&a=2&b=3'}
def test_query_params():
response = client.get('http://example.com/query_params/')
assert response.json() == {'query_params': {}}
response = client.get('http://example.com/query_params/?a=1&a=2&b=3')
assert response.json() == {
'query_params': {'a': ['1', '2'], 'b': ['3']}
}
def test_single_query_param():
response = client.get('http://example.com/page_query_param/')
assert response.json() == {'page': None}
response = client.get('http://example.com/page_query_param/?page=123')
assert response.json() == {'page': '123'}
response = client.get('http://example.com/page_query_param/?page=123&page=456')
assert response.json() == {'page': '123'}
def test_url():
response = client.get('http://example.com/url/')
assert response.json() == {'url': 'http://example.com/url/'}
response = client.get('https://example.com/url/')
assert response.json() == {'url': 'https://example.com/url/'}
response = client.get('http://example.com:123/url/')
assert response.json() == {'url': 'http://example.com:123/url/'}
response = client.get('https://example.com:123/url/')
assert response.json() == {'url': 'https://example.com:123/url/'}
response = client.get('http://example.com/url/?a=1')
assert response.json() == {'url': 'http://example.com/url/?a=1'}
def test_body():
response = client.post('http://example.com/body/', data='{"hello": 123}')
assert response.json() == {'body': '{"hello": 123}'}
def test_headers():
response = client.get('http://example.com/headers/')
assert response.json() == {'headers': {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Host': 'example.com',
'User-Agent': 'requests_client'
}}
response = client.get('http://example.com/headers/', headers={
'X-Example-Header': 'example'
})
assert response.json() == {'headers': {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Host': 'example.com',
'User-Agent': 'requests_client',
'X-Example-Header': 'example'
}}
def test_accept_header():
response = client.get('http://example.com/accept_header/')
assert response.json() == {'accept': '*/*'}
def test_request():
response = client.get('http://example.com/request/')
assert response.json() == {
'method': 'GET',
'url': 'http://example.com/request/',
'headers': {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Host': 'example.com',
'User-Agent': 'requests_client'
}
}
| 2.609375
| 3
|
Node.py
|
KubaWernerowski/Crimetrax
| 1
|
12782341
|
<reponame>KubaWernerowski/Crimetrax<filename>Node.py
class Node:
def __init__(self, x, y):
self.long = x
self.lat = y
self.neighbors = 0
def __str__(self):
return str(self.long) + "," + str(self.lat) + "," + str(self.neighbors)
| 2.9375
| 3
|
walden/main.py
|
aravindkoneru/Walden
| 1
|
12782342
|
import argparse
import os
import sys
from collections import namedtuple
from pathlib import Path
import toml
from ._build import build_journal
from ._create import create_journal
from ._data_classes import JournalConfiguration, WaldenConfiguration
from ._delete import delete_journal
from ._edit import edit_journal
from ._errors import WaldenException
from ._list import list_journals
from ._utils import print_error
# for initializing commands that need journal name
ARGUMENTS = [
("create", "create a new journal"),
("today", "edit today's entry"),
("delete", "delete specified journal"),
("build", "compile the specified journal"),
("view", "open the specified journal (OS dependent)"),
]
# for initializing flags
FLAGS = [
("list", "list all journals managed by walden"),
]
ARGUMENT_MAPPING = {
"build": build_journal,
"create": create_journal,
"delete": delete_journal,
"today": edit_journal,
#"view": view_journal
}
FLAG_MAPPING = {"list": list_journals}
def _parse_args() -> argparse.Namespace:
"""Create the arg parser from ARGUMENTS and return the parsed arguments"""
parser = argparse.ArgumentParser(description="edit and manage your walden journals")
ex_group = parser.add_mutually_exclusive_group(required=True)
for cmd, help_txt in ARGUMENTS:
ex_group.add_argument(
f"-{cmd[0]}",
f"--{cmd}",
type=str,
nargs=1,
help=help_txt,
metavar="JOURNAL_NAME",
)
for flag, help_txt in FLAGS:
ex_group.add_argument(
f"-{flag[0]}",
f"--{flag}",
action="store_true",
help=help_txt,
)
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
return parser.parse_args()
def _create_walden_config(config_file_path: Path):
"""Write default configuration file at specified path"""
config = {
"walden": {
"config_path": str(config_file_path),
"default_journal_path": str(Path.home() / "journals"),
}
}
config_file_path.write_text(toml.dumps(config))
def _validate_config(config: dict):
"""ensure that required fields are in config"""
if not config.get("walden", {}).get("config_path"):
raise WaldenException("Missing 'config_path' in walden configuration")
if not config["walden"].get("default_journal_path"):
raise WaldenException("Missing 'default_journal_path' in walden configuration")
def _parse_walden_config(config: dict) -> WaldenConfiguration:
"""Parse raw configuration into a dataclass for easier access"""
config_path, default_journal_path = Path(config["config_path"]), Path(
config["default_journal_path"]
)
journal_info = {}
for journal_name, journal_path in config.items():
if journal_name == "config_path" or journal_name == "default_journal_path":
continue
journal_info[journal_name] = JournalConfiguration(
name=journal_name, path=Path(journal_path)
)
return WaldenConfiguration(
config_path=config_path,
default_journal_path=default_journal_path,
journals=journal_info,
)
def _get_config() -> WaldenConfiguration:
"""Create configuration if it doesn't exist and return an object representing the config"""
config_dir = Path.home() / ".config" / "walden"
config_dir.mkdir(parents=True, exist_ok=True)
# config file is stored as a toml
config_file_path = config_dir / "walden.conf"
if not config_file_path.exists():
_create_walden_config(config_file_path)
config = toml.load(config_file_path)
_validate_config(config)
return _parse_walden_config(config["walden"])
def main():
"""Parse arguments, fetch config, and route command to appropriate function"""
try:
args = _parse_args()
config = _get_config()
cmd, value = next(
(cmd, value) for cmd, value in vars(args).items() if value != None
)
# check if command is a flag
if value == True:
sys.exit(FLAG_MAPPING[cmd](config))
if cmd in ["build", "delete", "view", "today"]:
# verify journal exists and is accessible
journal_name = value[0]
journal_info = config.journals.get(journal_name)
if not journal_info:
raise WaldenException(
f"'{journal_name}' not found! Please create a journal before attempting to access it."
)
journal_path = journal_info.path
if not journal_path.exists():
raise WaldenException(
f"Expected to find '{journal_name}' at {journal_path}, but found nothing!"
)
sys.exit(ARGUMENT_MAPPING[cmd](value, config))
except WaldenException as we:
print_error(we)
sys.exit(1)
except Exception as e:
raise e
sys.exit(1)
| 2.546875
| 3
|
auditor/auditor/config.py
|
ravirahman/sancus
| 2
|
12782343
|
from dataclasses import dataclass
from decimal import Decimal
from common.config import (
BTCProxyConfig,
GRPCServerConfig,
IPFSConfig,
SQLAlchemyConfig,
W3Config,
)
@dataclass(frozen=True)
class WebauthnConfig:
rp_name: str
rp_id: str
origin: str
@dataclass(frozen=True)
class AuditorConfig:
sqlalchemy_config: SQLAlchemyConfig
grpc_server_config: GRPCServerConfig
btc_proxy_config: BTCProxyConfig
webauthn_config: WebauthnConfig
w3_config: W3Config
audit_folder: str
ipfs_config: IPFSConfig
audit_smart_contract_address: str
acceptable_exchange_rate_epsilon: Decimal
| 2.09375
| 2
|
behind/chats/routing.py
|
teamsalad/behind-api
| 0
|
12782344
|
from django.urls import path
from chats import consumers
websocket_urlpatterns = [
path('ws/v1/chat_rooms/<int:id>/', consumers.ChatConsumer),
]
| 1.515625
| 2
|
pymanopt/manifolds/stiefel.py
|
paulroujansky/pymanopt
| 0
|
12782345
|
import numpy as np
from scipy.linalg import expm
from pymanopt.manifolds.manifold import EuclideanEmbeddedSubmanifold
from pymanopt.tools.multi import multiprod, multisym, multitransp
class Stiefel(EuclideanEmbeddedSubmanifold):
"""
Factory class for the Stiefel manifold. Instantiation requires the
dimensions n, p to be specified. Optional argument k allows the user to
optimize over the product of k Stiefels.
Elements are represented as n x p matrices (if k == 1), and as k x n x p
matrices if k > 1 (Note that this is different to manopt!).
"""
def __init__(self, n, p, k=1):
self._n = n
self._p = p
self._k = k
# Check that n is greater than or equal to p
if n < p or p < 1:
raise ValueError("Need n >= p >= 1. Values supplied were n = %d "
"and p = %d." % (n, p))
if k < 1:
raise ValueError("Need k >= 1. Value supplied was k = %d." % k)
if k == 1:
name = "Stiefel manifold St(%d, %d)" % (n, p)
elif k >= 2:
name = "Product Stiefel manifold St(%d, %d)^%d" % (n, p, k)
dimension = int(k * (n * p - p * (p + 1) / 2))
super().__init__(name, dimension)
@property
def typicaldist(self):
return np.sqrt(self._p * self._k)
def inner(self, X, G, H):
# Inner product (Riemannian metric) on the tangent space
# For the stiefel this is the Frobenius inner product.
return np.tensordot(G, H, axes=G.ndim)
def dist(self, X, Y):
raise NotImplementedError(
"The manifold '{:s}' currently provides no implementation of "
"the 'dist' method".format(self._get_class_name()))
def proj(self, X, U):
return U - multiprod(X, multisym(multiprod(multitransp(X), U)))
# TODO(nkoep): Implement the weingarten map instead.
def ehess2rhess(self, X, egrad, ehess, H):
XtG = multiprod(multitransp(X), egrad)
symXtG = multisym(XtG)
HsymXtG = multiprod(H, symXtG)
return self.proj(X, ehess - HsymXtG)
# Retract to the Stiefel using the qr decomposition of X + G.
def retr(self, X, G):
if self._k == 1:
# Calculate 'thin' qr decomposition of X + G
q, r = np.linalg.qr(X + G)
# Unflip any flipped signs
XNew = np.dot(q, np.diag(np.sign(np.sign(np.diag(r)) + 0.5)))
else:
XNew = X + G
for i in range(self._k):
q, r = np.linalg.qr(XNew[i])
XNew[i] = np.dot(
q, np.diag(np.sign(np.sign(np.diag(r)) + 0.5)))
return XNew
def norm(self, X, G):
# Norm on the tangent space of the Stiefel is simply the Euclidean
# norm.
return np.linalg.norm(G)
# Generate random Stiefel point using qr of random normally distributed
# matrix.
def rand(self):
if self._k == 1:
X = np.random.randn(self._n, self._p)
q, r = np.linalg.qr(X)
return q
X = np.zeros((self._k, self._n, self._p))
for i in range(self._k):
X[i], r = np.linalg.qr(np.random.randn(self._n, self._p))
return X
def randvec(self, X):
U = np.random.randn(*np.shape(X))
U = self.proj(X, U)
U = U / np.linalg.norm(U)
return U
def transp(self, x1, x2, d):
return self.proj(x2, d)
def exp(self, X, U):
# TODO: Simplify these expressions.
if self._k == 1:
W = expm(np.bmat([[X.T.dot(U), -U.T.dot(U)],
[np.eye(self._p), X.T.dot(U)]]))
Z = np.bmat([[expm(-X.T.dot(U))], [np.zeros((self._p, self._p))]])
Y = np.bmat([X, U]).dot(W).dot(Z)
else:
Y = np.zeros(np.shape(X))
for i in range(self._k):
W = expm(np.bmat([[X[i].T.dot(U[i]), -U[i].T.dot(U[i])],
[np.eye(self._p), X[i].T.dot(U[i])]]))
Z = np.bmat([[expm(-X[i].T.dot(U[i]))],
[np.zeros((self._p, self._p))]])
Y[i] = np.bmat([X[i], U[i]]).dot(W).dot(Z)
return Y
def zerovec(self, X):
if self._k == 1:
return np.zeros((self._n, self._p))
return np.zeros((self._k, self._n, self._p))
| 2.828125
| 3
|
autograd_forward/convenience_wrappers.py
|
BB-UCL/autograd-forward
| 30
|
12782346
|
<reponame>BB-UCL/autograd-forward<filename>autograd_forward/convenience_wrappers.py
from __future__ import absolute_import
from autograd.convenience_wrappers import (attach_name_and_doc, safe_type,
cast_to_same_dtype, grad)
from autograd.convenience_wrappers import hessian_vector_product as ahvp
from autograd_forward.core import make_jvp
def forward_derivative(fun, argnum=0):
"""
Derivative of fun w.r.t. scalar argument argnum.
"""
@attach_name_and_doc(fun, argnum, 'Forward mode derivative')
def dervfun(*args, **kwargs):
args = list(args)
args[argnum] = safe_type(args[argnum])
jvp, start_node = make_jvp(fun, argnum)(*args, **kwargs)
ans, d = jvp(cast_to_same_dtype(1.0, args[argnum]))
return d
return dervfun
def hessian_vector_product(fun, argnum=0, method='rev-rev'):
"""Builds a function that returns the exact Hessian-vector product.
The returned function has arguments (*args, vector, **kwargs), and takes
roughly 4x as long to evaluate as the original function.
There are two methods available, specified by the `method' parameter:
rev-rev (default) and fwd-rev. fwd-rev is faster and has lower memory
overhead but is incompatible with some primitives."""
if method == 'rev-rev':
return ahvp(fun, argnum)
elif method == 'fwd-rev':
return jacobian_vector_product(grad(fun, argnum), argnum)
else:
raise ValueError("{} is not a valid method for hessian_vector_product. "
"Valid methods are: 'rev-rev', 'fwd-rev'.".format(method))
def jacobian_vector_product(fun, argnum=0):
"""Builds a function that returns the exact Jacobian-vector product, that
is the Jacobian matrix right-multiplied by vector. The returned function
has arguments (*args, vector, **kwargs)."""
jvp = make_jvp(fun, argnum=argnum)
def jac_vec_prod(*args, **kwargs):
args, vector = args[:-1], args[-1]
return jvp(*args, **kwargs)[0](vector)[1]
return jac_vec_prod
| 2.53125
| 3
|
tests/cerami/datatype/translator/base_datatype_translator_test.py
|
gummybuns/dorm
| 0
|
12782347
|
from mock import patch, Mock
from tests.helpers.testbase import TestBase
from cerami.datatype import String
from cerami.datatype.translator import BaseDatatypeTranslator
class TestBaseDatatypeTranslator(TestBase):
def setUp(self):
self.dt = String()
self.translator = BaseDatatypeTranslator(self.dt)
def test_to_dynamodb_none(self):
"""it returns the NULL object when value is None"""
assert self.translator.to_dynamodb(None) == {'NULL': True}
def test_to_dynamodb(self):
"""it returns a dict
with the key the condition_type
and the value the result of resolve()
"""
with patch('cerami.datatype.translator.BaseDatatypeTranslator.format_for_dynamodb') as resolve:
resolve.return_value = "mocked"
res = self.translator.to_dynamodb('test')
assert res == {"S": "mocked"}
def test_to_cerami_null(self):
"""it returns None when mapped_dict is NULL"""
assert self.translator.to_cerami({'NULL': True}) == None
def test_to_cerami_calls_format_for_cerami(self):
"""calls format_for_cerami when the value is not NULL"""
self.translator.format_for_cerami = Mock()
self.translator.to_cerami({'S': 'test'})
self.translator.format_for_cerami.assert_called_with('test')
def test_format_for_cerami(self):
"""returns the value"""
assert self.translator.format_for_cerami('test') == 'test'
| 2.5625
| 3
|
irun/preprocessor.py
|
reizio/irun
| 0
|
12782348
|
<gh_stars>0
import io
import re
import token
import tokenize
from argparse import ArgumentParser, FileType
from dataclasses import dataclass
from irun.base import IRunException, Matchers
@dataclass
class PreprocessError(IRunException):
message: str
lineno: int
col_offset: int
end_lineno: int
end_col_offset: int
def register_tokens(token_dict):
def next_token_slot():
index = max(token.tok_name.keys(), default=0)
return index + 1
escaped_tokens = []
for name, value in token_dict.items():
slot = next_token_slot()
setattr(token, name, slot)
token.tok_name[slot] = name
token.EXACT_TOKEN_TYPES[value] = slot
escaped_tokens.append(re.escape(value))
tokenize.PseudoToken = tokenize.Whitespace + tokenize.group(
*escaped_tokens,
tokenize.PseudoExtras,
tokenize.Number,
tokenize.Funny,
tokenize.ContStr,
tokenize.Name,
)
register_tokens({"TRIPLE_STAR": "***", "DOLLAR": "$"})
# 1-to-1 token translations
TRANSLATION_SCHEMA = {
token.ELLIPSIS: (token.NAME, Matchers.MATCH_ONE),
token.TRIPLE_STAR: (token.NAME, Matchers.MATCH_ANY),
}
def _transpile_tokens(original_tokens):
new_tokens = []
cursor = 0
while cursor < len(original_tokens):
current_token = original_tokens[cursor]
if special_identifier := TRANSLATION_SCHEMA.get(current_token.exact_type):
new_tokens.append(special_identifier)
elif current_token.exact_type == token.DOLLAR:
# This should always be ENDMARKER, but just in case
if cursor + 1 == len(original_tokens):
raise PreprocessError("EOF", *current_token.start, *current_token.end)
next_token = original_tokens[cursor + 1]
if next_token.exact_type != token.NAME:
raise PreprocessError(
f"Expected a NAME token, got {token.tok_name[next_token.exact_type]}",
*current_token.start,
*current_token.end,
)
next_token = next_token._replace(
string=Matchers.MATCH_NAME.store(next_token.string)
)
new_tokens.append(next_token)
cursor += 1
else:
new_tokens.append(current_token)
cursor += 1
return new_tokens
def transpile(source):
source_buffer = io.StringIO(source)
token_iterator = tokenize.generate_tokens(source_buffer.readline)
new_tokens = _transpile_tokens(tuple(token_iterator))
return tokenize.untokenize(token[:2] for token in new_tokens)
def main(argv=None):
parser = ArgumentParser()
parser.add_argument("source", type=FileType())
options = parser.parse_args()
with options.source as stream:
print(transpile(stream.read()))
if __name__ == "__main__":
exit(main())
| 2.46875
| 2
|
application.py
|
gk2533/Python_Purple_Parrots
| 0
|
12782349
|
<reponame>gk2533/Python_Purple_Parrots
import uuid
from flask import Flask, request, jsonify
from flask_restplus import Resource, Api
from flask_restplus import fields
from flask_sqlalchemy import SQLAlchemy
import nltk.corpus
import nltk.tag
import nltk
import re
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
#Downloading nessecary tools for nltk
nltk.download('punkt', download_dir='/opt/python/current/app')
nltk.download('averaged_perceptron_tagger', download_dir='/opt/python/current/app')
nltk.data.path.append("/opt/python/current/app")
application = Flask(__name__)
api = Api(application)
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
db = SQLAlchemy(application)
message = api.model('message', {
'post a message here': fields.String(required=True, description='message post a message here'),
})
message_id = api.model('message_id', {
'id': fields.String(readOnly=True, description='unique identifier of a message'),
'post a message here': fields.String(required=True, description='message post a message here'),
})
class Message(db.Model):
id = db.Column(db.Text(80), primary_key=True)
content = db.Column(db.String(120), unique=False, nullable=False)
def __repr__(self):
return '<Message %r>' % self.content
def yodify(s): # takes in a string/sentence
h = nltk.word_tokenize(s) # converting every word in the sentence into list
b = nltk.pos_tag(h) # tagging every word in list with what type of word it is ( noun, verb,etc.)
# b is a list of tupples. the tupples are in the format of ( "word", "tag")
for item in b: # going through each tupple in the list
if item[1] == 'PP': # searching if any word is tagged as a preposition( PP)
word = re.search(item[0], s)
num = word.start()
return str(s[num:] + " " + s[:num])
if len(b) <= 4: # for sentences that have 4 or less words
return str(' '.join(h[-1:] + h[:len(b)-1]))
else:
return str(' '.join(h[-3:] + h[:-3])) # if sentence bigger than 4
# words has no prep, return the last 3 words in front of the sentence
def dog(sentence): # changes all the words in the sentence to "woof"
tokens = nltk.word_tokenize(sentence)
i = 0
str = ''
while i < len(tokens):
str += 'woof '
i += 1
return str
def cookie(sentence): # all "my"s and "I"s and "My"s change to "me" and "cookie" is inserted every other word
tokens = nltk.word_tokenize(sentence)
str = ''
for word in tokens:
if word == 'my' or word == 'I' or word == 'My':
str += 'me cookie '
else:
str += word + ' cookie '
return str
def kermit(sentence): # all instances of the word "commit" and turns to "kermit" and "Commit" to "Kermit"
tokens = nltk.word_tokenize(sentence)
str = ''
for word in tokens:
if word == 'commit':
str += 'kermit '
elif word == 'Commit':
str += 'Kermit '
else:
str += word + ' '
return str
def british(sentence): # talking like Daniel
tokens = nltk.word_tokenize(sentence)
str = ''
for index in tokens:
if index == 'color':
str += 'colour '
elif index == 'favorite':
str += 'favourite '
elif index == 'labor ':
str += 'labour '
elif index == 'tv':
str += 'telly '
elif index == 'line':
str += 'queue '
else:
str += index + ' '
str += 'mate'
return str
message_list = [] # creates a list, and later this list will have all the messages in it
def create_message(data): # this creates the messages, this method is called in the post method
id = str(uuid.uuid4())
content = data.get('post a message here')
message = Message(id=id, content=content)
message_list.append(content)
db.session.add(message)
db.session.commit()
return message
@api.route("/messageboard") # this get class returns all the messages
class MessageBoard(Resource):
def get(self):
return message_list
@api.route("/message/yoda")
class YodaMessage(Resource): # this is the yoda post class
@api.expect(message)
@api.marshal_with(message_id)
def post(self): # this post method posts a message with yodify, calls create_message method
result = {'post a message here': yodify(request.get_json().get('post a message here'))}
new_message = create_message(result)
return Message.query.filter(Message.id == new_message.id).one()
@api.route("/message/dog")
class DogMessage(Resource): # this is the dog post class
@api.expect(message)
@api.marshal_with(message_id)
def post(self): # this post method posts a message with dog
result = {'post a message here': dog(request.get_json().get('post a message here'))}
new_message = create_message(result)
return Message.query.filter(Message.id == new_message.id).one()
@api.route("/message/cookie")
class CookieMessage(Resource): # this is the cookie post class
@api.expect(message)
@api.marshal_with(message_id)
def post(self): # this post method posts a message with cookie
result = {'post a message here': cookie(request.get_json().get('post a message here'))}
new_message = create_message(result)
return Message.query.filter(Message.id == new_message.id).one()
@api.route("/message/kermit")
class KermitMessage(Resource): # this is the kermit post class
@api.expect(message)
@api.marshal_with(message_id)
def post(self): # this post method posts a message with kermit
result = {'post a message here': kermit(request.get_json().get('post a message here'))}
new_message = create_message(result)
return Message.query.filter(Message.id == new_message.id).one()
@api.route("/message/british") # this is the british post class
class BritishMessage(Resource):
@api.expect(message)
@api.marshal_with(message_id)
def post(self): # this post method posts a message with british
result = {'post a message here': british(request.get_json().get('post a message here'))}
new_message = create_message(result)
return Message.query.filter(Message.id == new_message.id).one()
@api.route("/message/<string:id>")
class MessageId(Resource):
@api.marshal_with(message_id)
def get(self, id):
return Message.query.filter(Message.id == id).one()
def configure_db():
db.create_all()
db.session.commit()
def get_app():
return application
def main():
configure_db()
application.debug = True
application.run()
if __name__ == "__main__":
main()
| 2.390625
| 2
|
py/g1/asyncs/kernels/g1/asyncs/kernels/pollers.py
|
clchiou/garage
| 3
|
12782350
|
<reponame>clchiou/garage
__all__ = [
'Poller',
'Polls',
# Poller implementations.
#
# TODO: Only epoll is supported as cross-platform is not priority.
'Epoll',
]
import enum
import errno
import math
import select
import threading
from typing import Sequence, Tuple, Union
from g1.bases.assertions import ASSERT
class Polls(enum.Enum):
"""Type of polls.
A task may either read or write a file, but never both at the same
time (at least I can't think of a use case of that).
"""
READ = enum.auto()
WRITE = enum.auto()
class Poller:
def close(self):
"""Close the poller."""
raise NotImplementedError
def notify_open(self, fd: int):
"""Add the given file descriptor to the poller."""
raise NotImplementedError
def notify_close(self, fd: int):
"""Remove the given file descriptor from the poller.
NOTE: This might be called in another thread.
"""
raise NotImplementedError
def poll(
self,
timeout: Union[float, None],
) -> Tuple[Sequence[int], Sequence[int]]:
"""Poll and return readable and writeable file descriptors.
NOTE: This could return extra file descriptors, like write-end
of pipes as readable file descriptors.
"""
raise NotImplementedError
class Epoll(Poller):
_EVENT_MASK = (
select.EPOLLIN | select.EPOLLOUT | select.EPOLLET | select.EPOLLRDHUP
)
# Add EPOLLHUP, EPOLLRDHUP, EPOLLERR to the mask. This should
# unblock all tasks whenever a file is readable or writeable, at the
# cost of (rare?) spurious wakeup or "extra" file descriptors.
_EVENT_IN = (
select.EPOLLIN | select.EPOLLHUP | select.EPOLLRDHUP | select.EPOLLERR
)
_EVENT_OUT = (
select.EPOLLOUT | select.EPOLLHUP | select.EPOLLRDHUP | select.EPOLLERR
)
def __init__(self):
self._lock = threading.Lock()
self._epoll = select.epoll()
self._closed_fds = set()
def close(self):
self._epoll.close()
def notify_open(self, fd):
ASSERT.false(self._epoll.closed)
try:
self._epoll.register(fd, self._EVENT_MASK)
except FileExistsError:
pass
def notify_close(self, fd):
ASSERT.false(self._epoll.closed)
with self._lock:
self._closed_fds.add(fd)
try:
self._epoll.unregister(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise
def poll(self, timeout):
ASSERT.false(self._epoll.closed)
with self._lock:
if self._closed_fds:
closed_fds, self._closed_fds = self._closed_fds, set()
return closed_fds, closed_fds
if timeout is None:
pass
elif timeout <= 0:
timeout = 0
else:
# epoll_wait() has a resolution of 1 millisecond.
timeout = math.ceil(timeout * 1e3) * 1e-3
can_read = []
can_write = []
# Since Python 3.5, poll retries with a re-computed timeout
# rather than raising InterruptedError (see PEP 475).
for fd, events in self._epoll.poll(timeout=timeout):
if events & self._EVENT_IN:
can_read.append(fd)
if events & self._EVENT_OUT:
can_write.append(fd)
return can_read, can_write
| 2.421875
| 2
|