repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
nishantkshyp2004/backend-coding-challenge-django
|
app/note_taking/serializers.py
|
<filename>app/note_taking/serializers.py
from rest_framework import serializers
from note_taking.models import NoteTaking
from django.contrib.auth.models import User
class NoteTakingSerializer(serializers.ModelSerializer):
"""
NoteTaking Serializer class.
"""
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = NoteTaking
fields = ['owner', 'title', 'body', 'tags', 'visibility']
class UserSerializer(serializers.ModelSerializer):
"""
User Serializer class.
"""
class Meta:
model = User
fields = ['url', 'id', 'username', 'password']
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
password = validated_data.pop('password')
user = User(**validated_data)
user.set_password(password)
user.save()
return user
|
nishantkshyp2004/backend-coding-challenge-django
|
app/note_taking/models.py
|
<reponame>nishantkshyp2004/backend-coding-challenge-django
from django.db import models
TAGS_CHOICES = [("office", "office"),
("shopping", "shopping"),
("holiday", "holiday"),
("study", "study"),
("personal", "personal"),
("friends", "friends"),
("home", "home"),
("family", "family")]
VISIBILITY_CHOICES = [("public", "public"),
("private", "private")]
class NoteTaking(models.Model):
"""
NoteTaking Model class.
"""
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
body = models.TextField()
tags = models.CharField(choices=TAGS_CHOICES, default='personal',
max_length=100)
visibility = models.CharField(choices=VISIBILITY_CHOICES, default='public',
max_length=100)
owner = models.ForeignKey('auth.User', related_name='note_taking',
on_delete=models.CASCADE)
class Meta:
ordering = ['created']
|
nishantkshyp2004/backend-coding-challenge-django
|
app/note_taking/permissions.py
|
<filename>app/note_taking/permissions.py
from rest_framework import permissions
class NoteTakingPermissions(permissions.BasePermission):
"""
NoteTaking permission class
"""
def has_permission(self, request, view):
"""
Check if the request is in SAFE_METHODS('GET', 'HEAD', 'OPTIONS') or
user is authenticated.
:param request: Request object
:param view: View
:return:Boolean(True or False)
"""
return bool(request.method in permissions.SAFE_METHODS or
(request.user and request.user.is_authenticated))
def has_object_permission(self, request, view, obj):
"""
Method to check the object permission for every kind of
rest api request i.e get, post, put, delete,
head or options.
:param request: Request object.
:param view: view.
:param obj: Object
:return: Boolean (True or False)
"""
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# If condition will staify the request those are not in
# SAFE_METHODS(GET, HEAD or OPTIONS)
# If visibility is public and owner is not the request
# user then obj should be only rendered,
# and shouldn`t be allowed to amend the records.
if obj.visibility == "public" and obj.owner != request.user:
return False
# Write permissions are only allowed to the owner of the snippet.
return obj.owner == request.user
class UserPermissions(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_permission(self, request, view):
return bool(request.method in permissions.SAFE_METHODS or
(request.user and request.user.is_authenticated)
)
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet.
return obj == request.user
|
nishantkshyp2004/backend-coding-challenge-django
|
app/note_taking/filters.py
|
from rest_framework import filters
class CustomSearchFilter(filters.SearchFilter):
"""
Customer search filter to return the search field as tags
if tags is present in the query parameter as true else call super.
"""
def get_search_fields(self, view, request):
if request.query_params.get('tags'):
return ['tags']
return super().get_search_fields(view, request)
|
nishantkshyp2004/backend-coding-challenge-django
|
app/note_taking/migrations/0001_initial.py
|
# Generated by Django 4.0 on 2021-12-19 20:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='NoteTaking',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(blank=True, default='', max_length=100)),
('body', models.TextField()),
('tags', models.CharField(choices=[('office', 'office'), ('shopping', 'shopping'), ('holiday', 'holiday'), ('study', 'study'), ('personal', 'personal'), ('friends', 'friends'), ('home', 'home'), ('family', 'family')], default='personal', max_length=100)),
('visibility', models.CharField(choices=[('public', 'public'), ('private', 'private')], default='public', max_length=100)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='note_taking', to='auth.user')),
],
options={
'ordering': ['created'],
},
),
]
|
nishantkshyp2004/backend-coding-challenge-django
|
app/note_taking/views.py
|
<filename>app/note_taking/views.py
from note_taking.models import NoteTaking
from note_taking.serializers import NoteTakingSerializer, UserSerializer
from django.contrib.auth.models import User
from note_taking.permissions import NoteTakingPermissions, UserPermissions
from note_taking.filters import CustomSearchFilter
from rest_framework import viewsets
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
class NoteTakingViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = NoteTaking.objects.all()
serializer_class = NoteTakingSerializer
permission_classes = [NoteTakingPermissions]
filter_backends = [CustomSearchFilter]
search_fields = ['title', 'body']
def perform_create(self, serializer):
"""
Defining the perform_create method to add owner key and its value
in the validated dict before saving.
:param serializer: NoteTakind serializer
:return: None
"""
serializer.save(owner=self.request.user)
def get_queryset(self):
"""
Restricts the returned queryset with visibility equal to public
if user is not authenticated.
"""
queryset = self.queryset
if not self.request.user.is_authenticated:
queryset = queryset.filter(visibility='public')
return queryset
class UserViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions for user.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [UserPermissions]
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'users': reverse('user-list', request=request,
format=format),
'notetaking': reverse('notetaking-list', request=request,
format=format)
})
|
danielsela42/rotsedatamodel
|
py/rotsedatamodel/io/nptools.py
|
<filename>py/rotsedatamodel/io/nptools.py
'''
Created on Dec 19, 2017
@author: Daniel
'''
import numpy as np
def case_insensative_recarray(dtype):
''' for single named fields, create a tuple of the lowercase and uppercase versions.
Args:
dtype: np data type
Process:
For each dtype element, checks if it is single-named.
If it is, replace the name with a tuple of lower and upper cases.
Returns:
New dtype with a new naming convention.
'''
ndtype = []
for i in dtype.descr:
name = i[0]
if isinstance(name, str):
i = tuple([(name.lower(), name.upper())] + list(i[1:]))
ndtype += [i]
return np.dtype(ndtype)
def add_recarray_field(recarray, narr):
''' append new field in a recarray
Args:
recarray : numpy recarray
narr: A list of tuples (name, array)
name : (str) name of field to be appended
array : numpy array or recarray to be appended
Process:
Creates a list of dtypes corresponding to fields narr.
Creates an empty recarray with old and new fields.
Copies fields of source recarray into the new recarray.
Copies new fields into the new recarray.
Returns:
numpy recarray appended
'''
# creates dtype of newfields
newfields = []
for name, array in narr:
arr = np.asarray(array)
numpy_dtype = arr.dtype
if isinstance(array, np.recarray):
numpy_dtype = case_insensative_recarray(numpy_dtype)
newfields += [((name.lower(), name.upper()), numpy_dtype, arr.shape)]
# make sure base recarray and both lower and upper names
base_dtype = case_insensative_recarray(recarray.dtype)
# create new empty recarray based on source, with added new field
newdtype = np.dtype(base_dtype.descr + newfields)
newrec = np.empty(recarray.shape, dtype=newdtype)
# copy source fields to new recarray
for field in recarray.dtype.fields:
newrec[field] = recarray[field]
# copy new field to its place in new recarray
for name, array in narr:
try:
arr = np.asarray(array)
newrec[name] = arr
except Exception as e:
raise RuntimeError("Failed to append {}; shape: {}; dtype: {}.".format(name, arr.shape, newdtype)) from e
return newrec
def create_struct(arrays):
''' combines arrays into recarray.
Args:
arrys: list of (name, ndarray) tuples.
'''
# create new dtype for the recarray
fields = []
for name, array in arrays:
fields += [(name, array.dtype, array.shape)]
head_name, head_array = arrays[0]
dtype = np.dtype(fields)
rec = np.empty(head_array.shape, dtype=dtype)
# copy source fields to new recarray
for field, array in arrays:
rec[field] = array
return rec
if __name__ == '__main__':
x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])
y = [('a', (20, 10)), ('b', (30, 50))]
z = add_recarray_field(x, y)
print(z.dtype, z)
shape = (3, 4, 5)
a = np.zeros(shape, dtype=np.float32)
b = np.zeros(shape, dtype=np.float32)
ab_list = [('a', a), ('b', b)]
ab = create_struct(ab_list)
print(ab)
c = np.zeros(shape, dtype=np.int32)
abc_list = ab_list + [('c', c)]
abc = create_struct(abc_list)
d = 100
new = np.empty(shape, dtype=[('a', 'f4', shape), ('b', 'f4', shape), ('c', 'i4', shape), ('d', 'i4', 1)])
print(new)
print(new.dtype)
|
danielsela42/rotsedatamodel
|
py/rotsedatamodel/tests/test_m2f.py
|
<reponame>danielsela42/rotsedatamodel
'''
Created on Dec 13, 2017
@author: arnon
'''
import unittest as ut
import numpy as np
from rotsedatamodel.match2fits import multimatch2fits, getmatch
from rotseutil import readfits
def remove_spaces(s):
# remove spaces in a string
return s.strip()
def compare_recarray(rec1, rec2):
''' compares 2 recarrays by looking at their individual fields
Args:
rec1, rec2: 2 recarrays to compare
Process:
Filters fields by using uppercase characters in the name.
Examining the shape to identify if one level deeper needs to be compared in rec1 or rec2.
Special text fields are stripped from spaces, since once built into a FITS column, spaces are removed.
Adds each field that has different contents to a list.
Returns:
A list of the fields with different contents between rec1 and rec2.
'''
different = []
filtered_fields = filter(lambda x: x.upper() == x, rec1.dtype.fields.keys())
for field in sorted(filtered_fields):
recfield1 = rec1[field]
recfield2 = rec2[field]
if isinstance(recfield1[0], np.recarray):
diff = compare_recarray(recfield1[0], recfield2[0])
different += diff
else:
specialfields = ['CAM_ID', 'CUNIT1', 'CUNIT2', 'OBSTYPE']
recindex1 = recfield1
recindex2 = recfield2
if isinstance(recfield1[0], np.ndarray):
recindex1 = recfield1[0]
vfunct = np.vectorize(remove_spaces)
if field in specialfields:
recindex1 = vfunct(recindex1)
if recindex1.shape != recindex2.shape:
if not np.array_equal(recindex1, recfield2[0]):
different += [field]
else:
if not np.array_equal(recindex1, recfield2):
different += [field]
return different
class TestM2F(ut.TestCase):
def columns(self):
self.assertTrue('FOO'.isupper())
def test_match2fits_dat(self):
''' run match2fits on a match file.
read the match file into memory.
read fits file into memory.
compare the two memory structures.
'''
match_file = '../dat/000409_xtetrans_1a_match.dat'
# These 3 lines: 73-75 as funct.
fits_file = multimatch2fits(match_file)
match = getmatch(match_file)
fits = readfits(fits_file[0])
diff = compare_recarray(match, fits)
self.assertTrue(len(diff) == 0, 'Failed in field: {}'.format(diff))
if __name__ == '__main__':
ut.main()
|
danielsela42/rotsedatamodel
|
py/rotsedatamodel/io/fitstools.py
|
'''
Created on Dec 20, 2017
@author: daniel
'''
from .nptools import add_recarray_field
from astropy.io import fits as pyfits
def readfits(filepath):
''' creates a numpy based structure from a FITS file generated by match2fits.
Args:
filepath: path to FITS file.
Process:
Opens FITS file and reads each of its BinTableHDUs.
Uses add_recarray_field to create a combined recarray using the BinTableHDUs.
Returns:
A numpy recarray.
'''
with pyfits.open(filepath, memmap=True) as hdus:
match = hdus[1].data
stat = hdus[2].data
map_ = hdus[3].data
stat_map = [('STAT', stat), ('MAP', map_)]
new_match = add_recarray_field(match, stat_map)
return new_match
if __name__ == '__main__':
import os
heredir = os.path.dirname(os.path.abspath(__file__))
basedir = os.path.dirname(heredir)
projdir = os.path.dirname(basedir)
datdir = os.path.join(projdir, 'dat')
filename = '000409_xtetrans_1a_match.fit'
fitsfile = os.path.join(datdir, filename)
fits = readfits(fitsfile)
print(fits.dtype)
'''
from concepts.mappingfits import elements
elements(fitsfile)
print(pyfits.info(fitsfile))
'''
|
future-standard/Counting-ICCV-DSSINet
|
models/CRFVGG.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.network import Conv2d, FC, Conv2d_dilated, np_to_variable
from src.vgg import vgg16
import numpy as np
from torch.autograd import Variable
import logging
## CRFFeatureRF
class MessagePassing(nn.Module):
def __init__(self, branch_n, input_ncs, bn=False):
super(MessagePassing, self).__init__()
self.branch_n = branch_n
self.iters = 2
for i in range(branch_n):
for j in range(branch_n):
if i == j:
continue
setattr(self, "w_0_{}_{}_0".format(j, i), \
nn.Sequential(
Conv2d_dilated(input_ncs[j], input_ncs[i], 1, dilation=1, same_padding=True, NL=None, bn=bn),
)
)
self.relu = nn.ReLU(inplace=False)
self.prelu = nn.PReLU()
def forward(self, input):
hidden_state = input
side_state = []
for _ in range(self.iters):
hidden_state_new = []
for i in range(self.branch_n):
unary = hidden_state[i]
binary = None
for j in range(self.branch_n):
if i == j:
continue
if binary is None:
binary = getattr(self, 'w_0_{}_{}_0'.format(j, i))(hidden_state[j])
else:
binary = binary + getattr(self, 'w_0_{}_{}_0'.format(j, i))(hidden_state[j])
binary = self.prelu(binary)
hidden_state_new += [self.relu(unary + binary)]
hidden_state = hidden_state_new
return hidden_state
class CRFVGG(nn.Module):
def __init__(self, output_stride=8, bn=False):
super(CRFVGG, self).__init__()
self.output_stride = output_stride
self.pyramid = [2, 0.5]
self.front_end = vgg16(struct='F', NL="prelu", output_stride=self.output_stride)
self.passing1 = MessagePassing( branch_n=2,
input_ncs=[128, 64],
)
self.passing2 = MessagePassing( branch_n=3,
input_ncs=[256, 128, 64],
)
self.passing3 = MessagePassing( branch_n=3,
input_ncs=[512, 256, 128]
)
self.passing4 = MessagePassing( branch_n=2,
input_ncs=[512, 256]
)
self.decoder1 = nn.Sequential(
Conv2d_dilated(512, 128, 1, dilation=1, same_padding=True, NL='relu', bn=bn),
Conv2d_dilated(128, 1, 3, dilation=1, same_padding=True, NL=None, bn=bn),
)
self.decoder2 = nn.Sequential(
Conv2d_dilated(768, 128, 1, dilation=1, same_padding=True, NL='relu', bn=bn),
Conv2d_dilated(128, 1, 3, dilation=1, same_padding=True, NL=None, bn=bn),
)
self.decoder3 = nn.Sequential(
Conv2d_dilated(896, 128, 1, dilation=1, same_padding=True, NL='relu', bn=bn),
Conv2d_dilated(128, 1, 3, dilation=1, same_padding=True, NL=None, bn=bn),
)
self.decoder4 = nn.Sequential(
Conv2d_dilated(448, 128, 1, dilation=1, same_padding=True, NL='relu', bn=bn),
Conv2d_dilated(128, 1, 3, dilation=1, same_padding=True, NL=None, bn=bn),
)
self.decoder5 = nn.Sequential(
Conv2d_dilated(192, 128, 1, dilation=1, same_padding=True, NL='relu', bn=bn),
Conv2d_dilated(128, 1, 3, dilation=1, same_padding=True, NL=None, bn=bn),
)
self.passing_weight1 = Conv2d_dilated(1, 1, 3, same_padding=True, NL=None, bn=bn)
self.passing_weight2 = Conv2d_dilated(1, 1, 3, same_padding=True, NL=None, bn=bn)
self.passing_weight3 = Conv2d_dilated(1, 1, 3, same_padding=True, NL=None, bn=bn)
self.passing_weight4 = Conv2d_dilated(1, 1, 3, same_padding=True, NL=None, bn=bn)
self.prelu = nn.PReLU()
self.relu = nn.ReLU()
def forward(self, im_data, return_feature=False):
conv1_2 = ['0', 'relu3']
conv1_2_na = ['0', '2']
conv2_2 = ['4', 'relu8']
conv2_2_na = ['4', '7']
conv3_3 = ['9', 'relu15']
conv3_3_na = ['9', '14']
# layer 16 is the max pooling layer
conv4_3 = ['16', 'relu22']
conv4_3_na = ['16', '21']
# droping the last pooling layer, 17 would become dilated with rate 2
# conv4_3 = ['17', 'relu22']
batch_size, C, H, W = im_data.shape
with torch.no_grad():
im_scale1 = nn.functional.upsample(im_data, size=(int(H * self.pyramid[0]), int(W * self.pyramid[0])), align_corners=False, mode="bilinear")
im_scale2 = im_data
im_scale3 = nn.functional.upsample(im_data, size=(int(H * self.pyramid[1]), int(W * self.pyramid[1])), align_corners=False, mode="bilinear")
mp_scale1_feature_conv2_na = self.front_end.features.sub_forward(conv1_2[0], conv2_2_na[1])(im_scale1)
mp_scale2_feature_conv1_na = self.front_end.features.sub_forward(*conv1_2_na)(im_scale2)
mp_scale1_feature_conv2, mp_scale2_feature_conv1 \
= self.passing1([mp_scale1_feature_conv2_na, mp_scale2_feature_conv1_na])
aggregation4 = torch.cat([mp_scale1_feature_conv2, mp_scale2_feature_conv1], dim=1)
mp_scale1_feature_conv3_na = self.front_end.features.sub_forward(*conv3_3_na)(mp_scale1_feature_conv2)
mp_scale2_feature_conv2_na = self.front_end.features.sub_forward(*conv2_2_na)(mp_scale2_feature_conv1)
mp_scale3_feature_conv1_na = self.front_end.features.sub_forward(*conv1_2_na)(im_scale3)
mp_scale1_feature_conv3, mp_scale2_feature_conv2, mp_scale3_feature_conv1 \
= self.passing2([mp_scale1_feature_conv3_na, mp_scale2_feature_conv2_na, mp_scale3_feature_conv1_na])
aggregation3 = torch.cat([mp_scale1_feature_conv3, mp_scale2_feature_conv2, mp_scale3_feature_conv1], dim=1)
mp_scale1_feature_conv4_na = self.front_end.features.sub_forward(*conv4_3_na)(mp_scale1_feature_conv3)
mp_scale2_feature_conv3_na = self.front_end.features.sub_forward(*conv3_3_na)(mp_scale2_feature_conv2)
mp_scale3_feature_conv2_na = self.front_end.features.sub_forward(*conv2_2_na)(mp_scale3_feature_conv1)
mp_scale1_feature_conv4, mp_scale2_feature_conv3, mp_scale3_feature_conv2 \
= self.passing3([mp_scale1_feature_conv4_na, mp_scale2_feature_conv3_na, mp_scale3_feature_conv2_na])
aggregation2 = torch.cat([mp_scale1_feature_conv4, mp_scale2_feature_conv3, mp_scale3_feature_conv2], dim=1)
mp_scale2_feature_conv4_na = self.front_end.features.sub_forward(*conv4_3_na)(mp_scale2_feature_conv3)
mp_scale3_feature_conv3_na = self.front_end.features.sub_forward(*conv3_3_na)(mp_scale3_feature_conv2)
mp_scale2_feature_conv4, mp_scale3_feature_conv3 \
= self.passing4([mp_scale2_feature_conv4_na, mp_scale3_feature_conv3_na])
aggregation1 = torch.cat([mp_scale2_feature_conv4, mp_scale3_feature_conv3], dim=1)
mp_scale3_feature_conv4 = self.front_end.features.sub_forward(*conv4_3)(mp_scale3_feature_conv3)
dens1 = self.decoder1(mp_scale3_feature_conv4)
dens2 = self.decoder2(aggregation1)
dens3 = self.decoder3(aggregation2)
dens4 = self.decoder4(aggregation3)
dens5 = self.decoder5(aggregation4)
dens1 = self.prelu(dens1)
dens2 = self.prelu(dens2 + self.passing_weight1(nn.functional.upsample(dens1, scale_factor=2, align_corners=False, mode="bilinear")))
dens3 = self.prelu(dens3 + self.passing_weight2(nn.functional.upsample(dens2, scale_factor=2, align_corners=False, mode="bilinear")))
dens4 = self.prelu(dens4 + self.passing_weight3(nn.functional.upsample(dens3, scale_factor=2, align_corners=False, mode="bilinear")))
dens5 = self.relu(dens5 + self.passing_weight4(nn.functional.upsample(dens4, scale_factor=2, align_corners=False, mode="bilinear")))
return dens5
|
future-standard/Counting-ICCV-DSSINet
|
src/RawLoader.py
|
import numpy as np
from scipy.ndimage import imread
import os
import random
import sys
import itertools
from density_gen import Gauss2D, read_image_label_fix, read_image_label_apdaptive, \
read_image_label_3d, read_image, save_density_map, get_annoted_kneighbors
import copy
import re
from collections import namedtuple
from src.timer import Timer
basic_config = {
'fixed': {
"sigma": 4.0, "f_sz": 15.0, "channels": 3, "downsize": 32
},
'adaptive': {
"K": 4, "channels": 3, "downsize": 32
},
'3d': {
"K":4, "S": [9,25,49,81], "channels": 3, "downsize": 32
},
'unlabel': {
"channels": 3, "downsize": 32
}
}
mode_func = {
'fixed': read_image_label_fix,
'adaptive': read_image_label_apdaptive,
'3d': read_image_label_3d,
'unlabel': read_image
}
Blob = namedtuple('Blob', ('img', 'den', 'gt_count'))
class ImageDataLoader():
def __init__(self, image_path, label_path, mode, is_preload=True, split=None, annReadFunc=None, **kwargs):
self.image_path = image_path
self.label_path = label_path
self.image_files = [filename for filename in os.listdir(image_path) \
if os.path.isfile(os.path.join(image_path,filename))]
self.label_files = [filename for filename in os.listdir(label_path) \
if os.path.isfile(os.path.join(label_path,filename))]
self.image_files.sort(cmp=lambda x, y: cmp('_'.join(re.findall(r'\d+',x)),'_'.join(re.findall(r'\d+',y))))
self.label_files.sort(cmp=lambda x, y: cmp('_'.join(re.findall(r'\d+',x)),'_'.join(re.findall(r'\d+',y))))
for img, lab in zip(self.image_files, self.label_files):
assert '_'.join(re.findall(r'\d+', img)) == '_'.join(re.findall(r'\d+',lab))
if split != None:
self.image_files = split(self.image_files)
self.label_files = split(self.label_files)
self.num_samples = len(self.image_files)
self.mode = mode
self.annReadFunc = annReadFunc
self.blob_list = []
self.fspecial = Gauss2D()
self.is_preload = is_preload
self.read_func_kwargs = kwargs
if 'test' in kwargs.keys():
self.test = kwargs['test']
else:
self.test = False
if self.mode == 'adaptive':
self.precompute_scale()
print("K neighbors for adaptive density map Done.")
if self.is_preload:
self.preload_data()
def preload_data(self):
print 'Pre-loading the data. This may take a while...'
t = Timer()
t.tic()
self.blob_list = [_ for _ in range(self.num_samples)]
self.is_preload = False
for i in range(self.num_samples):
self.blob_list[i] = (self.load_index(i))
if i % 50 == 0:
print "loaded {}/{} samples".format(i, self.num_samples)
duration = t.toc(average=False)
print 'Completed loading ' ,len(self.blob_list), ' files, time: ', duration
self.is_preload = True
def precompute_scale(self):
self.kneighbors = []
for i in range(self.num_samples):
neighbors = get_annoted_kneighbors(self.label_files[i], self.label_path, \
K=self.read_func_kwargs['K'], annReadFunc=self.annReadFunc)
self.kneighbors += [neighbors]
def load_index(self, i):
image_file, label_file = self.image_files[i], self.label_files[i]
if self.mode != 'adaptive':
img, den, gt_count = mode_func[self.mode](image_file, label_file, self.image_path, self.label_path, \
self.fspecial.get, annReadFunc=self.annReadFunc, **self.read_func_kwargs)
else:
img, den, gt_count = mode_func[self.mode](image_file, label_file, self.image_path, self.label_path, \
self.fspecial.get, annReadFunc=self.annReadFunc, kneighbors=self.kneighbors[i], \
**self.read_func_kwargs)
return Blob(img, den, gt_count)
def query_fname(self, i):
return self.image_files[i]
def __getitem__(self, i):
return self.__index__(i)
def __index__(self, i):
if self.is_preload:
blob = self.blob_list[i]
return blob
else:
return self.load_index(i)
def __iter__(self):
for i in range(self.num_samples):
yield self.__index__(i)
def get_num_samples(self):
return self.num_samples
def __len__(self):
return self.num_samples
|
future-standard/Counting-ICCV-DSSINet
|
src/density_gen.py
|
import cv2
from scipy.ndimage import imread
import numpy as np
import os
from sklearn.neighbors import NearestNeighbors
import collections
from itertools import repeat
import scipy.io as scio
from PIL import Image
def save_density_map(density_map, output_dir, fname='results.png'):
density_map = 255.0 * (density_map - np.min(density_map) + 1e-10) / (1e-10 + np.max(density_map) - np.min(density_map))
density_map = density_map.squeeze()
color_map = cv2.applyColorMap(density_map[:, :, np.newaxis].astype(np.uint8).repeat(3, axis=2), cv2.COLORMAP_JET)
cv2.imwrite(os.path.join(output_dir, fname), color_map)
def save_image(data, output_dir, fname='results.png'):
data = data.squeeze()
if len(data.shape) == 1:
data = data[:, :, np.newaxis].astype(np.uint8).repeat(3, axis=2)
else:
data = data[:,:,::-1].astype(np.uint8)
cv2.imwrite(os.path.join(output_dir, fname), data)
def save_density_raw(density_map, output_dir, fname='results.mat'):
scio.savemat(os.path.join(output_dir, fname), {'data': density_map})
def get_gauss(shape=(3, 3), sigma=0.5):
m, n = [(ss - 1.0) / 2.0 for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2.0 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
class Gauss2D(object):
"""docstring for DensityMap"""
def __init__(self):
super(Gauss2D, self).__init__()
self.kernel_set = {}
def get(self, shape=(3, 3), sigma=0.5):
if '%d_%d' % (int(shape[0]), int(sigma * 10)) not in self.kernel_set.keys():
m, n = [(ss - 1.0) / 2.0 for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2.0 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
# import pdb
# pdb.set_trace()
t = h[0][int(m)]
h[h < t] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
self.kernel_set['%d_%d' % (int(shape[0]), int(sigma * 10))] = h
return h
else:
return self.kernel_set['%d_%d' % (int(shape[0]), int(sigma * 10))]
def find_kneighbors(locations, K=6, threhold=0):
nbt = NearestNeighbors(n_neighbors=K, algorithm="ball_tree").fit(locations)
distances, indices = nbt.kneighbors(locations)
return indices
def load_annPoints(fname, annReadFunc):
data = scio.loadmat(fname)
annPoints = annReadFunc(data)
return annPoints
def check_xy(x, y, H, W):
if x > W + 10 or x < -10 or y > H + 10 or y < -10:
return False, None, None
else:
x = x if x < W else W - 1
x = x if x > 0 else 0
y = y if y < H else H - 1
y = y if y > 0 else 0
return True, int(x), int(y)
def add_filter(den, filter, x, y, f_sz, c=1.0):
H, W = den.shape
h_fsz = f_sz // 2
x1, x2, y1, y2 = x - h_fsz, x + h_fsz + 1, y - h_fsz, y + h_fsz + 1
fsum, dfx1, dfx2, dfy1, dfy2 = filter.sum(), 0, 0, 0, 0
if x1 < 0:
dfx1 = abs(x1)
x1 = 0
if x2 >= W:
dfx2 = x2 - W + 1
x2 = W
if y1 < 0:
dfy1 = abs(y1)
y1 = 0
if y2 >= H:
dfy2 = y2 - H + 1
y2 = H
x1h, x2h, y1h, y2h = dfx1, f_sz - dfx2 + 1, dfy1, f_sz - dfy2 + 1
den[y1:y2, x1:x2] = den[y1:y2, x1:x2] \
+ c * fsum / filter[y1h:y2h, x1h:x2h].sum() * filter[y1h:y2h, x1h:x2h]
return den
def get_density_map_fix(H, W, annPoints, get_gauss, sigma, f_sz):
den = np.zeros((H, W))
gt_count = 0
for i, p in enumerate(annPoints):
x, y = p
g, x, y = check_xy(x, y, H, W)
if g is False:
# print("point {} out of img {}x{} too much\n".format(p, H, W))
continue
else:
gt_count += 1
f_sz = int(f_sz) // 2 * 2 + 1
filter = get_gauss((f_sz, f_sz), sigma)
den = add_filter(den, filter, x, y, f_sz)
return den, gt_count
def get_annoted_kneighbors(label_file, label_path, annReadFunc, K):
annPoints = load_annPoints(os.path.join(label_path, label_file), annReadFunc)
if len(annPoints) > K:
kneighbors = find_kneighbors(annPoints, K)
else:
kneighbors = None
return kneighbors
def get_density_map_adaptive(H, W, annPoints, kneighbors, K, get_gauss):
den = np.zeros((H,W))
limit = min(min(H,W) / 8.0, 100.0)
use_limit = False
gt_count = 0
for i, p in enumerate(annPoints):
x, y = p
g, x, y = check_xy(x, y, H, W)
if g is False:
# print("point {} out of img {}x{} too much\n".format(p, H, W))
continue
else:
gt_count += 1
if len(annPoints) > K:
dis = ((annPoints[kneighbors[i][1:]][:,0] - annPoints[i][0])**2
+ (annPoints[kneighbors[i][1:]][:,1] - annPoints[i][1])**2)**0.5
dis = dis.mean()
else:
dis = limit
sigma = 0.3 * dis
f_sz = int(6.0 * sigma) // 2 * 2 + 1
filter = get_gauss((f_sz, f_sz), sigma)
den = add_filter(den, filter, x, y, f_sz)
return den, gt_count
def get_density_map_3d(H, W, annPoints, K, S, get_gauss):
D = len(S)
ov = 0.5
S = [9, 25, 49, 81]
S = np.asarray(S)
den = np.zeros((D, H, W))
if len(annPoints) > K:
kneighbors = find_kneighbors(annPoints, K)
gt_count = 0
for i, p in enumerate(annPoints):
x, y = p
g, x, y = check_xy(x, y, H, W)
if g is False:
# print("point {} out of img {}x{} too much\n".format(p, H, W))
continue
else:
gt_count += 1
if len(annPoints) > K:
dis = ((annPoints[kneighbors[i][1:]][:, 0] - annPoints[i][0])**2
+ (annPoints[kneighbors[i][1:]][:, 1] - annPoints[i][1])**2)**0.5
dis = dis.mean()
else:
dis = min(min(H, W) / 8.0, 100.0)
DN = np.where(S > dis)[0]
dn = DN[0] if len(DN) > 0 else D - 1
vn = np.exp(-((np.arange(D) - dn)**2) / (2 * ov))
vn = vn / sum(vn)
for i in range(D):
hh = vn[i]
f_sz = S[i]
sigma = 0.3 * f_sz
f_sz = int(5.0 * sigma) // 2 * 2 + 1
filter = get_gauss((f_sz, f_sz), sigma)
den[i, ...] = add_filter(den[i, ...], filter, x, y, f_sz, hh)
return den, gt_count
def read_image_label_fix(image_file, label_file, image_path, label_path, \
get_gauss, sigma, f_sz, channels, downsize, annReadFunc, test=False):
img = Image.open(os.path.join(image_path, image_file)).convert('RGB')
wd, ht = img.size
den = None
resize = False
annPoints = load_annPoints(os.path.join(label_path, label_file), annReadFunc)
if not test:
den, gt_count = get_density_map_fix(ht, wd, annPoints, get_gauss, sigma, f_sz)
if not test and (wd < 320 or ht < 320):
nwd = int(wd * 1.0/ min(wd, ht) * 320)
nht = int(ht * 1.0/ min(wd, ht) * 320)
resize = True
img = img.resize((nwd, nht), resample=Image.BICUBIC)
print "{} X {} -> {} X {}".format(ht, wd, nht, nwd)
wd = nwd
ht = nht
nht = (ht / downsize) * downsize
nwd = (wd / downsize) * downsize
if nht != ht or nwd != wd:
img = img.resize((nwd, nht), resample=Image.BICUBIC)
resize = True
if not test:
if resize:
count = den.sum()
den = cv2.resize(den, (nwd, nht))
if den.sum() != 0:
den = den * count / den.sum()
return img, den, len(annPoints)
def read_image_label_apdaptive(image_file, label_file, image_path, label_path, \
get_gauss, kneighbors, channels, downsize, K, annReadFunc, test=False):
img = Image.open(os.path.join(image_path, image_file)).convert('RGB')
wd, ht = img.size
den = None
resize = False
annPoints = load_annPoints(os.path.join(label_path, label_file), annReadFunc)
if not test:
den, gt_count = get_density_map_adaptive(ht, wd, annPoints, kneighbors, K, get_gauss)
if not test and (wd < 320 or ht < 320):
nwd = int(wd * 1.0/ min(wd, ht) * 320)
nht = int(ht * 1.0/ min(wd, ht) * 320)
resize = True
img = img.resize((nwd, nht), resample=Image.BICUBIC)
# print "{} X {} -> {} X {}".format(ht, wd, nht, nwd)
wd = nwd
ht = nht
nht = (ht / downsize) * downsize
nwd = (wd / downsize) * downsize
if nht != ht or nwd != wd:
img = img.resize((nwd, nht), resample=Image.BICUBIC)
resize = True
if not test:
if resize:
count = den.sum()
den = cv2.resize(den, (nwd, nht))
if den.sum() != 0:
den = den * count / den.sum()
return img, den, len(annPoints)
def read_image_label_3d(image_file, label_file, image_path, label_path, get_gauss, K, S, channels, downsize, annReadFunc):
img = imread(os.path.join(image_path, image_file), 1)
img = img.astype(np.float32, copy=False)
ht = img.shape[0]
wd = img.shape[1]
annPoints = load_annPoints(os.path.join(label_path, label_file), annReadFunc)
den, gt_count = get_density_map_3d(ht, wd, annPoints, K, S, get_gauss)
denstiy_channels = len(S)
ht_1 = (ht / downsize) * downsize
wd_1 = (wd / downsize) * downsize
img = cv2.resize(img, (wd_1, ht_1))
img = img.reshape((1, 1, img.shape[0], img.shape[1]))
if channels != 1:
img = np.repeat(img, channels, axis=1)
den_resize = []
for i in range(denstiy_channels):
den_ = cv2.resize(den[i], (wd_1, ht_1))
den_ = den_ * ((wd * ht * 1.0) / (wd_1 * ht_1))
den_resize.append(den_[np.newaxis, ...])
den = np.vstack(den_resize)
den = den.reshape((1, denstiy_channels, den.shape[1], den.shape[2]))
# gt_count = np.sum(den)
return img, den, gt_count
def read_image(image_file, image_path, channels, downsize):
# print image_file
img = imread(os.path.join(image_path, image_file), 1)
img = img.astype(np.float32, copy=False)
ht = img.shape[0]
wd = img.shape[1]
ht_1 = (ht / downsize) * downsize
wd_1 = (wd / downsize) * downsize
img = cv2.resize(img, (wd_1, ht_1))
img = img.reshape((1, 1, img.shape[0], img.shape[1]))
if channels != 1:
img = np.repeat(img, channels, axis=1)
return img
|
future-standard/Counting-ICCV-DSSINet
|
src/datasets.py
|
import socket
hostname = socket.gethostname()
SHANG_PATH = ''
UCFEC_PATH = ''
WORLD_PATH = ''
TRANCOS_PATH = ''
UCF_PATH = ''
UCSD_PATH = ''
datasets = {
'shanghaiA': {
"density_method": "adaptive",
"density_method_config": {'downsize':32},
"train_image_path": SHANG_PATH + '/part_A_final/train_data/images',
"train_label_path": SHANG_PATH + '/part_A_final/train_data/ground_truth',
"test_image_path": SHANG_PATH + '/part_A_final/test_data/images',
"test_label_path": SHANG_PATH + '/part_A_final/test_data/ground_truth',
"train_val_split": (lambda x:x, lambda x:x[:29]),
"annReadFunc": lambda x: x['image_info'][0][0][0][0][0],
"mean_std": [96.3414, 66.8793],
"annReadFuncTest": None
},
'shanghaiB': {
"density_method": "adaptive",
"density_method_config": {'downsize':32},
"train_image_path": SHANG_PATH + '/part_B_final/train_data/images/',
"train_label_path": SHANG_PATH + '/part_B_final/train_data/ground_truth',
"test_image_path": SHANG_PATH + '/part_B_final/test_data/images/',
"test_label_path": SHANG_PATH + '/part_B_final/test_data/ground_truth',
"train_val_split": (lambda x:x, lambda x:x[:29]),
"annReadFunc": lambda x: x['image_info'][0][0][0][0][0],
"mean_std": [96.3414, 66.8793],
"annReadFuncTest": None
},
### pre-crop high resolution images to normal size patches
'UCF_ECCV_Crop': {
"density_method": "adaptive",
"density_method_config": {'downsize':32},
"train_image_path": UCFEC_PATH + '/Train/crop_images',
"train_label_path": UCFEC_PATH + '/Train/crop_ground_truth',
"test_image_path": UCFEC_PATH + '/Test/images/',
"test_label_path": UCFEC_PATH + '/Test/ground_truth',
"train_val_split": (lambda x:x, lambda x:x[:1]),
"annReadFunc": lambda x: x['annPoints'],
"annReadFuncTest": None
},
}
def CreateDataLoader(opt, phase=None):
from RawLoader import ImageDataLoader, basic_config
from sampler import basic_config as sampler_config
from sampler import mode_func as sampler_func
import utils
import numpy as np
train_image_path = datasets[opt.dataset]["train_image_path"]
train_label_path = datasets[opt.dataset]["train_label_path"]
test_image_path = datasets[opt.dataset]["test_image_path"]
test_label_path = datasets[opt.dataset]["test_label_path"]
density_method = datasets[opt.dataset]["density_method"]
density_method_config = basic_config[density_method]
for k,v in datasets[opt.dataset]["density_method_config"].items():
density_method_config[k] = v
annReadFunc = datasets[opt.dataset]["annReadFunc"]
annReadFuncTest = datasets[opt.dataset]["annReadFuncTest"] or annReadFunc
split = (lambda x:x, lambda x:x)
train_val_split = datasets[opt.dataset]["train_val_split"] or split
train_split = train_val_split[0]
val_split = train_val_split[1]
print("density map config: " + datasets[opt.dataset]["density_method"])
for k,v in density_method_config.items():
print("{}:{}".format(k, v))
if phase is None or phase == 'train':
crop_type = opt.crop_type
crop_scale = opt.crop_scale
crop_size = opt.crop_size
train_sample_func = sampler_func[crop_type]
train_sample_config = sampler_config[crop_type]
if "crop_scale" in train_sample_config.keys():
train_sample_config['crop_scale'] = crop_scale
if "crop_size" in train_sample_config.keys():
train_sample_config['crop_size'] = crop_size
print("crop config: " + crop_type)
for k,v in train_sample_config.items():
print("{}:{}".format(k, v))
if phase is None or phase == 'test':
test_crop_type = opt.test_crop_type
test_sample_func = sampler_func[test_crop_type]
test_sample_config = sampler_config[test_crop_type]
if test_crop_type == 'Adap':
test_sample_config['fixed_size'] = opt.test_fixed_size
# if opt.test_fixed_size == -1:
# assert opt.test_batch_size == 1
else:
assert False
print("test crop config: " + opt.test_crop_type)
for k,v in test_sample_config.items():
print("{}:{}".format(k, v))
if phase is None:
data_loader_train = ImageDataLoader(train_image_path, train_label_path, density_method, is_preload=opt.is_preload, \
annReadFunc=annReadFunc, split=train_split,
**density_method_config)
data_loader_val = ImageDataLoader(train_image_path, train_label_path, density_method, is_preload=opt.is_preload, \
annReadFunc=annReadFunc, split=val_split,
**density_method_config)
data_loader_test = ImageDataLoader(test_image_path, test_label_path, density_method, is_preload=opt.is_preload, \
annReadFunc=annReadFuncTest, test=True, \
**density_method_config)
data_loader_train = train_sample_func(data_loader_train, shuffle=True, \
patches_per_sample=opt.patches_per_sample, **train_sample_config)
data_loader_val = train_sample_func(data_loader_val, shuffle=True, \
patches_per_sample=opt.patches_per_sample, **train_sample_config)
data_loader_test = test_sample_func(data_loader_test, shuffle=False, **test_sample_config)
return data_loader_train, data_loader_val, data_loader_test
elif phase == 'train':
data_loader_train = ImageDataLoader(train_image_path, train_label_path, density_method, is_preload=opt.is_preload, \
annReadFunc=annReadFunc, split=train_split,
**density_method_config)
data_loader_train = train_sample_func(data_loader_train, shuffle=True, \
patches_per_sample=opt.patches_per_sample, **train_sample_config)
return data_loader_train
elif phase == 'test':
pure_test = True if not hasattr(opt, 'save_output') else not opt.save_output
data_loader_test = ImageDataLoader(test_image_path, test_label_path, density_method, is_preload=opt.is_preload, \
annReadFunc=annReadFuncTest, test=pure_test, \
**density_method_config)
data_loader_test = test_sample_func(data_loader_test, shuffle=False, **test_sample_config)
return data_loader_test
|
future-standard/Counting-ICCV-DSSINet
|
src/train_options.py
|
<gh_stars>10-100
import argparse
import os
import torch
from datetime import datetime
import shutil
import time
import random
import glob
import logging
import sys
try:
from pycrayon import CrayonClient
except ImportError:
CrayonClient = None
class TrainOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--gpus', type=str, help='gpu_id')
parser.add_argument('--dataset', type=str, default='shanghaiA', help='dataset')
parser.add_argument('--epochs', type=int, default=900)
parser.add_argument('--lr', type=float, default=0.00001)
parser.add_argument('--visual', dest='use_tensorboard', action='store_true')
parser.add_argument('--no-visual', dest='use_tensorboard', action='store_false')
parser.set_defaults(use_tensorboard=True)
parser.add_argument('--save', dest='save_model_para', action='store_true')
parser.add_argument('--no-save', dest='save_model_para', action='store_false')
parser.set_defaults(save_model_para=True)
parser.add_argument('--preload', dest='is_preload', action='store_true')
parser.add_argument('--no-preload', dest='is_preload', action='store_false')
parser.set_defaults(is_preload=True)
parser.add_argument('--disp_interval', type=int, default=50)
parser.add_argument('--save_interval', type=int, default=500)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--pretrain', type=str)
parser.add_argument('--crop_type', type=str, default="Fixed")
parser.add_argument('--crop_scale', type=int, default=4)
parser.add_argument('--crop_size', type=str, default='224x224')
parser.add_argument('--patches_per_sample', type=int, default=5)
parser.add_argument('--loss', type=str, default="MSE")
parser.add_argument('--loss_scale', type=float, default=1.0)
parser.add_argument('--model_name', type=str)
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
opt.logger.info(message)
file_name = os.path.join(opt.expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
# opt.isTrain = self.isTrain # train or test
model = opt.model_name
dataset_name = opt.dataset #dataset name - used for saving model file
exp = 'v7-{}-{}-{}/'.format(dataset_name, model, datetime.now().strftime('exp-%m-%d_%H-%M'))
expr_dir = './saved_models/{}/'.format(exp) #model files are saved here
opt.crop_size = map(int, opt.crop_size.split('x'))
if opt.save_model_para and not os.path.exists(expr_dir):
os.mkdir(expr_dir)
os.mkdir(expr_dir+'./sup/')
else:
expr_dir = '../../temp1/'
if not os.path.exists(expr_dir+'./sup/'):
os.mkdir(expr_dir+'./sup/')
opt.expr_dir = expr_dir
logger = logging.getLogger()
fh = logging.FileHandler("{0}/{1}.log".format(expr_dir, 'log'), mode='w')
fh.setFormatter(logging.Formatter(fmt="%(asctime)s %(message)s", datefmt="%d-%H:%M"))
logger.addHandler(fh)
opt.logger = logger
self.opt = opt
#Tensorboard config
use_tensorboard = opt.use_tensorboard
remove_all_log = False # remove all historical experiments in TensorBoardO
use_tensorboard = use_tensorboard and CrayonClient is not None
self.vis_exp = None
if use_tensorboard:
cc = CrayonClient(hostname='8.8.8.8', port=7879)
if remove_all_log:
cc.remove_all_experiments()
random.seed(time.time())
vis_exp_name = exp + str(random.random())
opt.vis_exp_name = vis_exp_name
self.vis_exp = cc.create_experiment(vis_exp_name)
import socket
hostname = socket.gethostname()
# set gpu ids
str_ids = opt.gpus.split(',')
opt.gpus = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpus.append(id)
if len(opt.gpus) > 0:
torch.cuda.set_device(opt.gpus[0])
self.opt = opt
self.print_options(opt)
return self.opt
|
tanyapole/OVANet
|
utils/utils.py
|
from models.basenet import *
import os
import torch
import neptune
import socket
def get_model_mme(net, num_class=13, temp=0.05, top=False, norm=True):
dim = 2048
if "resnet" in net:
model_g = ResBase(net, top=top)
if "resnet18" in net:
dim = 512
if net == "resnet34":
dim = 512
elif "vgg" in net:
model_g = VGGBase(option=net, pret=True, top=top)
dim = 4096
if top:
dim = 1000
print("selected network %s"%net)
return model_g, dim
def log_set(kwargs):
source_data = kwargs["source_data"]
target_data = kwargs["target_data"]
network = kwargs["network"]
conf_file = kwargs["config_file"]
script_name = kwargs["script_name"]
multi = kwargs["multi"]
#args = kwargs["args"]
target_data = os.path.splitext(os.path.basename(target_data))[0]
logname = "{file}_{source}2{target}_{network}_hp_{hp}".format(file=script_name.replace(".py", ""),
source=source_data.split("_")[1],
target=target_data,
network=network,
hp=str(multi))
logname = os.path.join("record", kwargs["exp_name"],
os.path.basename(conf_file).replace(".yaml", ""), logname)
if not os.path.exists(os.path.dirname(logname)):
os.makedirs(os.path.dirname(logname))
print("record in %s " % logname)
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(filename=logname, format="%(message)s")
logger.setLevel(logging.INFO)
logger.info("{}_2_{}".format(source_data, target_data))
return logname
def save_model(model_g, model_c1, model_c2, save_path):
save_dic = {
'g_state_dict': model_g.state_dict(),
'c1_state_dict': model_c1.state_dict(),
'c2_state_dict': model_c2.state_dict(),
}
torch.save(save_dic, save_path)
def load_model(model_g, model_c, load_path):
checkpoint = torch.load(load_path)
model_g.load_state_dict(checkpoint['g_state_dict'])
model_c.load_state_dict(checkpoint['c_state_dict'])
return model_g, model_c
|
Suveesh/Disaster-Response-Pipeline
|
models/train_classifier.py
|
<filename>models/train_classifier.py
# In[1]:
import sys
import nltk
import warnings
warnings.filterwarnings('ignore') # "error", "ignore", "always", "default", "module" or "once"
nltk.download('punkt')
nltk.download('wordnet')
import pandas as pd
import numpy as np
import re
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import confusion_matrix
from nltk.stem.porter import PorterStemmer
from sklearn.model_selection import GridSearchCV
import pickle
# In[2]:
def load_data(database_filepath):
'''
Function to retreive data from sql database (database_filepath) and split the dataframe into X and y variable
Input: Databased filepath
Output: Returns the Features X & target y along with target columns names catgeory_names
'''
engine = create_engine('sqlite:///'+ database_filepath)
df = pd.read_sql("SELECT * FROM final", engine)
X = df['message']
Y = df.iloc[:,4:]
category_names = Y.columns.values
return X, Y, category_names
# In[3]:
def tokenize(text):
'''
Function to clean the text data and apply tokenize and lemmatizer function
Return the clean tokens
Input: text
Output: cleaned tokenized text as a list object
'''
# Remove punctuation
text = re.sub(r'[^a-zA-Z0-9]', ' ',text)
# Tokenize text
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
# Remove stop words
# tokens = [word for word in tokens if word not in stopwords.words('english')]
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='n').strip()
#I passed in the output from the previous noun lemmatization step. This way of chaining procedures is very common.
clean_tok = lemmatizer.lemmatize(clean_tok, pos='v')
clean_tokens.append(clean_tok)
return clean_tokens
# In[4]:
def build_model():
'''
Function to build a model, create pipeline, hypertuning as well as gridsearchcv
Input: N/A
Output: Returns the model
'''
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
parameters = {'tfidf__norm': ['l1','l2'],
'clf__estimator__criterion': ["gini", "entropy"]
}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
# In[5]:
def evaluate_model(model, X_test, Y_test, category_names):
'''
Function to evaluate a model and return the classificatio and accurancy score.
Inputs: Model, X_test, y_test, Catgegory_names
Outputs: Prints the Classification report & Accuracy Score
'''
Y_pred = model.predict(X_test)
report= classification_report(Y_pred,Y_test, target_names=category_names)
temp=[]
for item in report.split("\n"):
temp.append(item.strip().split(' '))
clean_list=[ele for ele in temp if ele != ['']]
report_df=pd.DataFrame(clean_list[1:],columns=['group','precision','recall', 'f1-score','support'])
return report
# In[6]:
def save_model(model, model_filepath):
'''
Function to save the model as pickle file in the directory
Input: model and the file path to save the model
Output: save the model as pickle file in the give filepath
'''
with open(model_filepath, 'wb') as file:
pickle.dump(model, file)
# In[7]:
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
# In[8]:
if __name__ == '__main__':
main()
# %%
# %%
|
Suveesh/Disaster-Response-Pipeline
|
data/process_data.py
|
<filename>data/process_data.py
# In[1]:
import sys
import pandas as pd
import numpy as np
import re
from sqlalchemy import create_engine
# In[2]:
def load_data(messages_filepath, categories_filepath):
'''
Function to load Messages and Categories Data set from the csv file and merge
into a single data frame named df variable
return a dataframe is merged on id column in both messages and categories data frame
Input: messages_filepath, categories_filepath
Output: Merged dataframe of messages and categories dataframe
'''
#Read csv file and load in the variable as dataframe
messages_df = pd.read_csv(messages_filepath)
categories_df = pd.read_csv(categories_filepath)
#merge message_df and categories_df on id column
df = pd.merge(messages_df, categories_df, on="id", how='inner')
return df
# In[3]:
def clean_data(df):
'''
Function to clean the dataframe inorder to be compatible for the machinelearning application.
Split the categories column with delimit ';' and
Convert the first row values in categories dataframe to the column headers.
Convert the numerical values other than 0 and 1 as 1.
Drop the duplicate rows from df dataframe
Remove the existing categories column from the df dataframe and concat the formatted
categories dataframe with df dataframe.
Input: df
Output: cleaned and formatted dataframe
'''
#Split the categories column in df dataframe and delimit the columns with ';'
categories = df['categories'].str.split(';', expand = True)
row = categories.iloc[0].str.split('-', expand = True)
categories.columns = list(row[0])
#convert first row value in categories clolumns to labels
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].str.split('-').str.get(-1)
# convert column from string to numeric
categories[column] = categories[column].astype(int)
#set numerical values other than 0 and 1 as 1 by default
for n, i in enumerate(categories[column]):
if i > 1:
categories[column][n] = 1
#Drop Duplicates
df.drop(['categories'], axis = 1, inplace = True)
df = pd.concat([df, categories], axis=1, join="inner").drop_duplicates()
return df
# In[4]:
def save_data(df, database_filename):
'''
Function to save the cleaned dataframe into a sql database with file name 'final'
Input: df, database_filename
Output: SQL Database
'''
engine = create_engine('sqlite:///'+ database_filename)
df.to_sql('final', engine, index=False, if_exists = 'replace')
# In[5]:
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
# In[6]:
if __name__ == '__main__':
main()
# %%
|
JugglingNumbers/cma-es
|
cma/__init__.py
|
from .core import CMA
|
JugglingNumbers/cma-es
|
notebook/utils/plot.py
|
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib.ticker import FormatStrFormatter, LogLocator
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from scipy.stats import chi2
import tensorflow as tf
def plot_3d_surface(
fitness_fn,
xlim,
ylim,
zlim=None,
view_init=None,
mean=None,
solutions=None,
show_axes=True,
fig=None,
ax=None,
figsize=(15, 8),
):
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
a = np.linspace(*xlim, 100)
b = np.linspace(*ylim, 100)
A, B = np.meshgrid(a, b)
grid_values = tf.convert_to_tensor([[u, v] for u, v in zip(np.ravel(A), np.ravel(B))])
zs = fitness_fn(grid_values).numpy()
Z = zs.reshape(A.shape)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
if mean is not None:
ax.scatter3D(
[mean[0]],
[mean[1]],
[fitness_fn(tf.convert_to_tensor([mean])).numpy()[0]],
depthshade=False,
marker='+',
color='red',
s=50,
)
if solutions is None:
solutions = []
for i, solution in enumerate(solutions):
ax.scatter3D(
[solution[0]],
[solution[1]],
[fitness_fn(tf.convert_to_tensor([solution])).numpy()[0]],
depthshade=False,
marker='o',
color='red' if (i+1) <= len(solutions) / 2 else 'grey',
s=30,
)
ax.plot_surface(A, B, Z, cmap='cool', alpha=0.8)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.grid(False)
if zlim is not None:
ax.set_zlim(zlim)
if view_init is not None:
ax.view_init(*view_init)
if not show_axes:
plt.axis('off')
return fig, ax
def plot_2d_contour(
fitness_fn,
xlim,
ylim,
mean=None,
solutions=None,
levels=25,
show_axes=True,
show_color_scale=True,
fig=None,
ax=None,
figsize=(15, 8),
log_scale=False,
):
if fig is None:
fig = plt.figure(figsize=figsize)
if ax is None:
ax = fig.add_subplot(111)
a = np.linspace(*xlim, 100)
b = np.linspace(*ylim, 100)
A, B = np.meshgrid(a, b)
grid_values = tf.convert_to_tensor([[u, v] for u, v in zip(np.ravel(A), np.ravel(B))])
zs = fitness_fn(grid_values).numpy()
Z = zs.reshape(A.shape)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
if mean is not None:
ax.plot(
mean[0],
mean[1],
marker='+',
color='black',
markersize=12,
linestyle='None',
label='mean',
)
if solutions is None:
solutions = []
mu = int(np.floor(len(solutions) / 2))
for i, solution in enumerate(solutions):
if i == 0:
label = 'population (selected)'
elif i == mu:
label = 'population (discarded)'
else:
label = None
ax.plot(
solution[0],
solution[1],
marker='o',
color='white',
markersize=8 if (i+1) <= len(solutions) / 2 else 5,
linestyle='None',
markeredgecolor='grey' if (i+1) <= len(solutions) / 2 else None,
label=label
)
locator = None
if log_scale is True:
locator = LogLocator()
cs = ax.contourf(A, B, Z, levels=levels, cmap='cool', locator=locator)
if fig is not None and show_color_scale:
fig.colorbar(cs, ax=ax)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.grid(False)
if mean is not None or len(solutions) > 0:
ax.legend()
if not show_axes:
plt.axis('off')
return fig, ax
def draw_confidence_ellipse(
ax,
mean,
eigenvectors,
eigenvalues,
confidence=0.95,
facecolor='None',
edgecolor='black',
**kwargs,
):
"""
Draw a covariance error ellipse, i.e. an iso-contour of the multivariate normal distribution.
A 95% confidence ellipse (default) shows where 95% of sampled points will fall.
Ref: https://www.visiondummy.com/2014/04/draw-error-ellipse-representing-covariance-matrix/
"""
if not np.isscalar(confidence) or confidence <= 0 or confidence >= 1:
raise ValueError('Confidence must be a number between 0 and 1')
chi2_val = chi2.isf(q=1. - confidence, df=2)
width = 2 * np.sqrt(chi2_val * eigenvalues[0])
height = 2 * np.sqrt(chi2_val * eigenvalues[1])
# Counter clockwise angle in degrees between the y-axis and the
# second principal axis of the covariance matrix.
# Note: the angle between the x-axis and the first principal axis is the same,
# thus angle_deg([1, 0], eigenvectors[0]) is equivalent.
angle = angle_deg([0, 1], eigenvectors[1])
ellipse = Ellipse(
xy=(mean[0], mean[1]),
width=width,
height=height,
angle=angle,
facecolor=facecolor,
edgecolor=edgecolor,
**kwargs,
)
ax.add_patch(ellipse);
def plot_generations(generations, cma_trace, fitness_fn, xlim, ylim, num_columns=3):
num_rows = int(np.ceil(len(generations) / num_columns))
f, axes = plt.subplots(
num_rows,
num_columns,
sharex=True,
sharey=True,
figsize=(16, 5 * num_rows),
)
axes = axes.flatten()
for i, ax in enumerate(axes):
if i >= len(generations):
ax.remove()
continue
generation = generations[i]
trace = cma_trace[generation]
m = trace['m']
B = trace['B']
l = trace['σ']**2 * np.diagonal(trace['D'])**2
population = trace['population']
plot_2d_contour(
fitness_fn,
xlim=xlim,
ylim=ylim,
mean=m,
solutions=population,
show_color_scale=False,
fig=f,
ax=ax,
);
draw_confidence_ellipse(
ax,
mean=m,
eigenvectors=B,
eigenvalues=l,
confidence=0.95,
)
if i > 0:
ax.get_legend().remove()
ax.set_xlabel('')
ax.set_ylabel('')
ax.get_xaxis().set_major_formatter(FormatStrFormatter('%.2f'))
ax.get_yaxis().set_major_formatter(FormatStrFormatter('%.2f'))
ax.set_title(f'Generation {generation}')
return f, axes
def plot_mean_coordinates(trace, num_columns=2, figsize=(15, 6)):
means = np.vstack([t['m'] for t in trace])
generations = range(len(means))
num_rows = int(np.ceil(means.shape[1] / num_columns))
_fig_size = (figsize[0], figsize[1] * num_rows)
fig, axes = plt.subplots(num_rows, num_columns, figsize=_fig_size)
axes = axes.flatten()
for i, ax in enumerate(axes):
if i >= means.shape[1]:
ax.remove()
continue
ax.plot(generations, means[:,i])
ax.set_xlabel('Generation')
ax.set_title(f'$X_{i+1}$')
ax.grid(True)
fig.suptitle('Evolution of the mean\n', fontsize='x-large');
return fig, axes
def angle_rad(u, v):
"""
Counter-clockwise angle in radian between vectors u and v.
"""
a = u / np.linalg.norm(u, 2)
b = v / np.linalg.norm(v, 2)
return np.arctan2(
a[0] * b[1] - a[1] * b[0],
a[0] * b[0] + a[1] * b[1],
)
def angle_deg(u, v):
"""
Counter-clockwise angle in degrees between vectors u and v.
"""
return angle_rad(u, v) * (180 / np.pi)
|
JugglingNumbers/cma-es
|
notebook/tensorboard_example.py
|
import logging
import os
import tensorflow as tf
if os.getcwd().split(os.sep)[-1] == 'notebook':
os.chdir('..')
from cma import CMA
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s (%(levelname)s) %(message)s")
tf.random.set_seed(123)
max_epochs = 500
log_dir = 'logs/griewank_function'
summary_writer = tf.summary.create_file_writer(log_dir)
def logging_function(cma, logger):
fitness = cma.best_fitness()
# Write best fitness to the tensorboard summary log
with summary_writer.as_default():
tf.summary.scalar('fitness', fitness, step=cma.generation)
# Periodically log progress
if cma.generation % 10 == 0:
logger.info(f'Generation {cma.generation} - fitness {fitness}')
if cma.termination_criterion_met or cma.generation == max_epochs:
sol = cma.best_solution()
logger.info(f'Final solution at gen {cma.generation}: {sol} (fitness: {fitness})')
cma = CMA(
initial_solution=[100.] * 10,
initial_step_size=600.,
fitness_function=fitness_fn,
enforce_bounds=[[-600, 600]] * 10,
callback_function=logging_function,
)
cma.search(max_epochs)
def fitness_fn(x):
"""
Griewank Function
https://www.sfu.ca/~ssurjano/griewank.html
"""
dimension = tf.shape(x)[1].numpy()
s, p = [], []
for i in range(dimension):
s.append(x[:,i]**2)
p.append(tf.cos(x[:,i] / tf.sqrt(tf.cast(i, dtype=tf.float64) + 1)))
return 1. + (1. / 4000) * tf.reduce_sum(s, axis=0) - tf.reduce_prod(p, axis=0)
if __name__ == '__main__':
main()
|
JugglingNumbers/cma-es
|
notebook/utils/__init__.py
|
<reponame>JugglingNumbers/cma-es<gh_stars>10-100
from .plot import (
plot_3d_surface,
plot_2d_contour,
plot_generations,
draw_confidence_ellipse,
plot_mean_coordinates,
)
|
JugglingNumbers/cma-es
|
setup.py
|
import setuptools
description = (
"Covariance Matrix Adaptation Evolution Strategy (CMA-ES) implemented with TensorFlow"
)
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='cma-es',
packages=['cma'],
version='1.5.0',
license='MIT',
author="<NAME>",
author_email="<EMAIL>",
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/srom/cma-es',
keywords=['optimization', 'numerical-optimization', 'tensorflow'],
python_requires='>=3.6',
install_requires=[
'tensorflow',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
)
|
bernardkucina/vislice
|
tekstovni_umeski.py
|
<gh_stars>0
import model
def izpis_poraza(igra):
return f'IZGUBIL SI , <NAME>: {igra.geslo}'
def izpis_zmage(igra):
return f'ZMAGAL SI, <NAME>: {igra.geslo},' + f'POTREBOVAL SI {len(igra.napacne_crke())} UGIBOV'
def izpis_igra(igra):
text = (
f'Stanje gesla: {igra.praviln_del_gesla()} \n '
f'Imaš še {model.STEVILO_DOVOLJENIH_NAPAK - igra.stevilo_napak()} možnosti za napako'
)
return text
def zahtevaj_vnos():
return input('Vpiši naslednjo črko')
def pozeni_umesnik():
#Naredimo novo igro
trenutna_igra = model.nova_igra()
while True:
print(izpis_igra(trenutna_igra))
crka = zahtevaj_vnos()
rezultat = trenutna_igra.ugibaj(crka)
if trenutna_igra.zmaga():
print(izpis_zmage(trenutna_igra))
break
if trenutna_igra.poraz():
print(izpis_poraza(trenutna_igra))
break
pozeni_umesnik()
|
renovate-bot/python-deploy
|
google/cloud/deploy_v1/services/cloud_deploy/async_client.py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core.client_options import ClientOptions
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.cloud.deploy_v1.services.cloud_deploy import pagers
from google.cloud.deploy_v1.types import cloud_deploy
from .client import CloudDeployClient
from .transports.base import DEFAULT_CLIENT_INFO, CloudDeployTransport
from .transports.grpc_asyncio import CloudDeployGrpcAsyncIOTransport
class CloudDeployAsyncClient:
"""CloudDeploy service creates and manages Continuous Delivery
operations on Google Cloud Platform via Skaffold
(https://skaffold.dev).
"""
_client: CloudDeployClient
DEFAULT_ENDPOINT = CloudDeployClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = CloudDeployClient.DEFAULT_MTLS_ENDPOINT
build_path = staticmethod(CloudDeployClient.build_path)
parse_build_path = staticmethod(CloudDeployClient.parse_build_path)
cluster_path = staticmethod(CloudDeployClient.cluster_path)
parse_cluster_path = staticmethod(CloudDeployClient.parse_cluster_path)
config_path = staticmethod(CloudDeployClient.config_path)
parse_config_path = staticmethod(CloudDeployClient.parse_config_path)
delivery_pipeline_path = staticmethod(CloudDeployClient.delivery_pipeline_path)
parse_delivery_pipeline_path = staticmethod(
CloudDeployClient.parse_delivery_pipeline_path
)
membership_path = staticmethod(CloudDeployClient.membership_path)
parse_membership_path = staticmethod(CloudDeployClient.parse_membership_path)
release_path = staticmethod(CloudDeployClient.release_path)
parse_release_path = staticmethod(CloudDeployClient.parse_release_path)
rollout_path = staticmethod(CloudDeployClient.rollout_path)
parse_rollout_path = staticmethod(CloudDeployClient.parse_rollout_path)
target_path = staticmethod(CloudDeployClient.target_path)
parse_target_path = staticmethod(CloudDeployClient.parse_target_path)
worker_pool_path = staticmethod(CloudDeployClient.worker_pool_path)
parse_worker_pool_path = staticmethod(CloudDeployClient.parse_worker_pool_path)
common_billing_account_path = staticmethod(
CloudDeployClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
CloudDeployClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(CloudDeployClient.common_folder_path)
parse_common_folder_path = staticmethod(CloudDeployClient.parse_common_folder_path)
common_organization_path = staticmethod(CloudDeployClient.common_organization_path)
parse_common_organization_path = staticmethod(
CloudDeployClient.parse_common_organization_path
)
common_project_path = staticmethod(CloudDeployClient.common_project_path)
parse_common_project_path = staticmethod(
CloudDeployClient.parse_common_project_path
)
common_location_path = staticmethod(CloudDeployClient.common_location_path)
parse_common_location_path = staticmethod(
CloudDeployClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CloudDeployAsyncClient: The constructed client.
"""
return CloudDeployClient.from_service_account_info.__func__(CloudDeployAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CloudDeployAsyncClient: The constructed client.
"""
return CloudDeployClient.from_service_account_file.__func__(CloudDeployAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return CloudDeployClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> CloudDeployTransport:
"""Returns the transport used by the client instance.
Returns:
CloudDeployTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(CloudDeployClient).get_transport_class, type(CloudDeployClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, CloudDeployTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the cloud deploy client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CloudDeployTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = CloudDeployClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_delivery_pipelines(
self,
request: Union[cloud_deploy.ListDeliveryPipelinesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDeliveryPipelinesAsyncPager:
r"""Lists DeliveryPipelines in a given project and
location.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_list_delivery_pipelines():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.ListDeliveryPipelinesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_delivery_pipelines(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.ListDeliveryPipelinesRequest, dict]):
The request object. The request object for
`ListDeliveryPipelines`.
parent (:class:`str`):
Required. The parent, which owns this collection of
pipelines. Format must be
projects/{project_id}/locations/{location_name}.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.deploy_v1.services.cloud_deploy.pagers.ListDeliveryPipelinesAsyncPager:
The response object from ListDeliveryPipelines.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.ListDeliveryPipelinesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_delivery_pipelines,
default_retry=retries.Retry(
initial=1.0,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListDeliveryPipelinesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_delivery_pipeline(
self,
request: Union[cloud_deploy.GetDeliveryPipelineRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cloud_deploy.DeliveryPipeline:
r"""Gets details of a single DeliveryPipeline.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_get_delivery_pipeline():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.GetDeliveryPipelineRequest(
name="name_value",
)
# Make the request
response = await client.get_delivery_pipeline(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.GetDeliveryPipelineRequest, dict]):
The request object. The request object for
`GetDeliveryPipeline`
name (:class:`str`):
Required. Name of the ``DeliveryPipeline``. Format must
be
projects/{project_id}/locations/{location_name}/deliveryPipelines/{pipeline_name}.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.deploy_v1.types.DeliveryPipeline:
A DeliveryPipeline resource in the Google Cloud Deploy
API.
A DeliveryPipeline defines a pipeline through which a
Skaffold configuration can progress.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.GetDeliveryPipelineRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_delivery_pipeline,
default_retry=retries.Retry(
initial=1.0,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def create_delivery_pipeline(
self,
request: Union[cloud_deploy.CreateDeliveryPipelineRequest, dict] = None,
*,
parent: str = None,
delivery_pipeline: cloud_deploy.DeliveryPipeline = None,
delivery_pipeline_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new DeliveryPipeline in a given project and
location.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_create_delivery_pipeline():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.CreateDeliveryPipelineRequest(
parent="parent_value",
delivery_pipeline_id="delivery_pipeline_id_value",
)
# Make the request
operation = client.create_delivery_pipeline(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.CreateDeliveryPipelineRequest, dict]):
The request object. The request object for
`CreateDeliveryPipeline`.
parent (:class:`str`):
Required. The parent collection in which the
``DeliveryPipeline`` should be created. Format should be
projects/{project_id}/locations/{location_name}.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
delivery_pipeline (:class:`google.cloud.deploy_v1.types.DeliveryPipeline`):
Required. The ``DeliveryPipeline`` to create.
This corresponds to the ``delivery_pipeline`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
delivery_pipeline_id (:class:`str`):
Required. ID of the ``DeliveryPipeline``.
This corresponds to the ``delivery_pipeline_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.deploy_v1.types.DeliveryPipeline` A
DeliveryPipeline resource in the Google Cloud Deploy
API.
A DeliveryPipeline defines a pipeline through which a
Skaffold configuration can progress.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, delivery_pipeline, delivery_pipeline_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.CreateDeliveryPipelineRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if delivery_pipeline is not None:
request.delivery_pipeline = delivery_pipeline
if delivery_pipeline_id is not None:
request.delivery_pipeline_id = delivery_pipeline_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_delivery_pipeline,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
cloud_deploy.DeliveryPipeline,
metadata_type=cloud_deploy.OperationMetadata,
)
# Done; return the response.
return response
async def update_delivery_pipeline(
self,
request: Union[cloud_deploy.UpdateDeliveryPipelineRequest, dict] = None,
*,
delivery_pipeline: cloud_deploy.DeliveryPipeline = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates the parameters of a single DeliveryPipeline.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_update_delivery_pipeline():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.UpdateDeliveryPipelineRequest(
)
# Make the request
operation = client.update_delivery_pipeline(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.UpdateDeliveryPipelineRequest, dict]):
The request object. The request object for
`UpdateDeliveryPipeline`.
delivery_pipeline (:class:`google.cloud.deploy_v1.types.DeliveryPipeline`):
Required. The ``DeliveryPipeline`` to update.
This corresponds to the ``delivery_pipeline`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. Field mask is used to specify the fields to be
overwritten in the ``DeliveryPipeline`` resource by the
update. The fields specified in the update_mask are
relative to the resource, not the full request. A field
will be overwritten if it is in the mask. If the user
does not provide a mask then all fields will be
overwritten.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.deploy_v1.types.DeliveryPipeline` A
DeliveryPipeline resource in the Google Cloud Deploy
API.
A DeliveryPipeline defines a pipeline through which a
Skaffold configuration can progress.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([delivery_pipeline, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.UpdateDeliveryPipelineRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if delivery_pipeline is not None:
request.delivery_pipeline = delivery_pipeline
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_delivery_pipeline,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("delivery_pipeline.name", request.delivery_pipeline.name),)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
cloud_deploy.DeliveryPipeline,
metadata_type=cloud_deploy.OperationMetadata,
)
# Done; return the response.
return response
async def delete_delivery_pipeline(
self,
request: Union[cloud_deploy.DeleteDeliveryPipelineRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a single DeliveryPipeline.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_delete_delivery_pipeline():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.DeleteDeliveryPipelineRequest(
name="name_value",
)
# Make the request
operation = client.delete_delivery_pipeline(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.DeleteDeliveryPipelineRequest, dict]):
The request object. The request object for
`DeleteDeliveryPipeline`.
name (:class:`str`):
Required. The name of the ``DeliveryPipeline`` to
delete. Format should be
projects/{project_id}/locations/{location_name}/deliveryPipelines/{pipeline_name}.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.DeleteDeliveryPipelineRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_delivery_pipeline,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=cloud_deploy.OperationMetadata,
)
# Done; return the response.
return response
async def list_targets(
self,
request: Union[cloud_deploy.ListTargetsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTargetsAsyncPager:
r"""Lists Targets in a given project and location.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_list_targets():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.ListTargetsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_targets(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.ListTargetsRequest, dict]):
The request object. The request object for
`ListTargets`.
parent (:class:`str`):
Required. The parent, which owns this collection of
targets. Format must be
projects/{project_id}/locations/{location_name}.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.deploy_v1.services.cloud_deploy.pagers.ListTargetsAsyncPager:
The response object from ListTargets.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.ListTargetsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_targets,
default_retry=retries.Retry(
initial=1.0,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListTargetsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_target(
self,
request: Union[cloud_deploy.GetTargetRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cloud_deploy.Target:
r"""Gets details of a single Target.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_get_target():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.GetTargetRequest(
name="name_value",
)
# Make the request
response = await client.get_target(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.GetTargetRequest, dict]):
The request object. The request object for `GetTarget`.
name (:class:`str`):
Required. Name of the ``Target``. Format must be
projects/{project_id}/locations/{location_name}/targets/{target_name}.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.deploy_v1.types.Target:
A Target resource in the Google Cloud Deploy API.
A Target defines a location to which a Skaffold
configuration can be deployed.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.GetTargetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_target,
default_retry=retries.Retry(
initial=1.0,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def create_target(
self,
request: Union[cloud_deploy.CreateTargetRequest, dict] = None,
*,
parent: str = None,
target: cloud_deploy.Target = None,
target_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Target in a given project and location.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_create_target():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.CreateTargetRequest(
parent="parent_value",
target_id="target_id_value",
)
# Make the request
operation = client.create_target(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.CreateTargetRequest, dict]):
The request object. The request object for
`CreateTarget`.
parent (:class:`str`):
Required. The parent collection in which the ``Target``
should be created. Format should be
projects/{project_id}/locations/{location_name}.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target (:class:`google.cloud.deploy_v1.types.Target`):
Required. The ``Target`` to create.
This corresponds to the ``target`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_id (:class:`str`):
Required. ID of the ``Target``.
This corresponds to the ``target_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.deploy_v1.types.Target` A Target
resource in the Google Cloud Deploy API.
A Target defines a location to which a Skaffold
configuration can be deployed.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, target, target_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.CreateTargetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if target is not None:
request.target = target
if target_id is not None:
request.target_id = target_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_target,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
cloud_deploy.Target,
metadata_type=cloud_deploy.OperationMetadata,
)
# Done; return the response.
return response
async def update_target(
self,
request: Union[cloud_deploy.UpdateTargetRequest, dict] = None,
*,
target: cloud_deploy.Target = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates the parameters of a single Target.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_update_target():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.UpdateTargetRequest(
)
# Make the request
operation = client.update_target(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.UpdateTargetRequest, dict]):
The request object. The request object for
`UpdateTarget`.
target (:class:`google.cloud.deploy_v1.types.Target`):
Required. The ``Target`` to update.
This corresponds to the ``target`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. Field mask is used to specify the fields to be
overwritten in the Target resource by the update. The
fields specified in the update_mask are relative to the
resource, not the full request. A field will be
overwritten if it is in the mask. If the user does not
provide a mask then all fields will be overwritten.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.deploy_v1.types.Target` A Target
resource in the Google Cloud Deploy API.
A Target defines a location to which a Skaffold
configuration can be deployed.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([target, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.UpdateTargetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if target is not None:
request.target = target
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_target,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("target.name", request.target.name),)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
cloud_deploy.Target,
metadata_type=cloud_deploy.OperationMetadata,
)
# Done; return the response.
return response
async def delete_target(
self,
request: Union[cloud_deploy.DeleteTargetRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a single Target.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_delete_target():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.DeleteTargetRequest(
name="name_value",
)
# Make the request
operation = client.delete_target(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.DeleteTargetRequest, dict]):
The request object. The request object for
`DeleteTarget`.
name (:class:`str`):
Required. The name of the ``Target`` to delete. Format
should be
projects/{project_id}/locations/{location_name}/targets/{target_name}.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.DeleteTargetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_target,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=cloud_deploy.OperationMetadata,
)
# Done; return the response.
return response
async def list_releases(
self,
request: Union[cloud_deploy.ListReleasesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListReleasesAsyncPager:
r"""Lists Releases in a given project and location.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_list_releases():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.ListReleasesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_releases(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.ListReleasesRequest, dict]):
The request object. The request object for
`ListReleases`.
parent (:class:`str`):
Required. The ``DeliveryPipeline`` which owns this
collection of ``Release`` objects.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.deploy_v1.services.cloud_deploy.pagers.ListReleasesAsyncPager:
The response object from ListReleases.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.ListReleasesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_releases,
default_retry=retries.Retry(
initial=1.0,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListReleasesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_release(
self,
request: Union[cloud_deploy.GetReleaseRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cloud_deploy.Release:
r"""Gets details of a single Release.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_get_release():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.GetReleaseRequest(
name="name_value",
)
# Make the request
response = await client.get_release(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.GetReleaseRequest, dict]):
The request object. The request object for `GetRelease`.
name (:class:`str`):
Required. Name of the ``Release``. Format must be
projects/{project_id}/locations/{location_name}/deliveryPipelines/{pipeline_name}/releases/{release_name}.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.deploy_v1.types.Release:
A Release resource in the Google Cloud Deploy API.
A Release defines a specific Skaffold configuration
instance that can be deployed.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.GetReleaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_release,
default_retry=retries.Retry(
initial=1.0,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def create_release(
self,
request: Union[cloud_deploy.CreateReleaseRequest, dict] = None,
*,
parent: str = None,
release: cloud_deploy.Release = None,
release_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Release in a given project and
location.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_create_release():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.CreateReleaseRequest(
parent="parent_value",
release_id="release_id_value",
)
# Make the request
operation = client.create_release(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.CreateReleaseRequest, dict]):
The request object. The request object for
`CreateRelease`,
parent (:class:`str`):
Required. The parent collection in which the ``Release``
should be created. Format should be
projects/{project_id}/locations/{location_name}/deliveryPipelines/{pipeline_name}.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
release (:class:`google.cloud.deploy_v1.types.Release`):
Required. The ``Release`` to create.
This corresponds to the ``release`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
release_id (:class:`str`):
Required. ID of the ``Release``.
This corresponds to the ``release_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.deploy_v1.types.Release` A Release
resource in the Google Cloud Deploy API.
A Release defines a specific Skaffold configuration
instance that can be deployed.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, release, release_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.CreateReleaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if release is not None:
request.release = release
if release_id is not None:
request.release_id = release_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_release,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
cloud_deploy.Release,
metadata_type=cloud_deploy.OperationMetadata,
)
# Done; return the response.
return response
async def approve_rollout(
self,
request: Union[cloud_deploy.ApproveRolloutRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cloud_deploy.ApproveRolloutResponse:
r"""Approves a Rollout.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_approve_rollout():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.ApproveRolloutRequest(
name="name_value",
approved=True,
)
# Make the request
response = await client.approve_rollout(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.ApproveRolloutRequest, dict]):
The request object. The request object used by
`ApproveRollout`.
name (:class:`str`):
Required. Name of the Rollout. Format
is
projects/{project}/locations/{location}/deliveryPipelines/{deliveryPipeline}/
releases/{release}/rollouts/{rollout}.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.deploy_v1.types.ApproveRolloutResponse:
The response object from ApproveRollout.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.ApproveRolloutRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.approve_rollout,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_rollouts(
self,
request: Union[cloud_deploy.ListRolloutsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListRolloutsAsyncPager:
r"""Lists Rollouts in a given project and location.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_list_rollouts():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.ListRolloutsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_rollouts(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.ListRolloutsRequest, dict]):
The request object. ListRolloutsRequest is the request
object used by `ListRollouts`.
parent (:class:`str`):
Required. The ``Release`` which owns this collection of
``Rollout`` objects.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.deploy_v1.services.cloud_deploy.pagers.ListRolloutsAsyncPager:
ListRolloutsResponse is the response object reutrned by
ListRollouts.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.ListRolloutsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_rollouts,
default_retry=retries.Retry(
initial=1.0,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListRolloutsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_rollout(
self,
request: Union[cloud_deploy.GetRolloutRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cloud_deploy.Rollout:
r"""Gets details of a single Rollout.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_get_rollout():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.GetRolloutRequest(
name="name_value",
)
# Make the request
response = await client.get_rollout(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.GetRolloutRequest, dict]):
The request object. GetRolloutRequest is the request
object used by `GetRollout`.
name (:class:`str`):
Required. Name of the ``Rollout``. Format must be
projects/{project_id}/locations/{location_name}/deliveryPipelines/{pipeline_name}/releases/{release_name}/rollouts/{rollout_name}.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.deploy_v1.types.Rollout:
A Rollout resource in the Google Cloud Deploy API.
A Rollout contains information around a specific
deployment to a Target.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.GetRolloutRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_rollout,
default_retry=retries.Retry(
initial=1.0,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def create_rollout(
self,
request: Union[cloud_deploy.CreateRolloutRequest, dict] = None,
*,
parent: str = None,
rollout: cloud_deploy.Rollout = None,
rollout_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Rollout in a given project and
location.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_create_rollout():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
rollout = deploy_v1.Rollout()
rollout.target_id = "target_id_value"
request = deploy_v1.CreateRolloutRequest(
parent="parent_value",
rollout_id="rollout_id_value",
rollout=rollout,
)
# Make the request
operation = client.create_rollout(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.CreateRolloutRequest, dict]):
The request object. CreateRolloutRequest is the request
object used by `CreateRollout`.
parent (:class:`str`):
Required. The parent collection in which the ``Rollout``
should be created. Format should be
projects/{project_id}/locations/{location_name}/deliveryPipelines/{pipeline_name}/releases/{release_name}.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
rollout (:class:`google.cloud.deploy_v1.types.Rollout`):
Required. The ``Rollout`` to create.
This corresponds to the ``rollout`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
rollout_id (:class:`str`):
Required. ID of the ``Rollout``.
This corresponds to the ``rollout_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.deploy_v1.types.Rollout` A Rollout
resource in the Google Cloud Deploy API.
A Rollout contains information around a specific
deployment to a Target.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, rollout, rollout_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.CreateRolloutRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if rollout is not None:
request.rollout = rollout
if rollout_id is not None:
request.rollout_id = rollout_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_rollout,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
cloud_deploy.Rollout,
metadata_type=cloud_deploy.OperationMetadata,
)
# Done; return the response.
return response
async def get_config(
self,
request: Union[cloud_deploy.GetConfigRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cloud_deploy.Config:
r"""Gets the configuration for a location.
.. code-block:: python
from google.cloud import deploy_v1
async def sample_get_config():
# Create a client
client = deploy_v1.CloudDeployAsyncClient()
# Initialize request argument(s)
request = deploy_v1.GetConfigRequest(
name="name_value",
)
# Make the request
response = await client.get_config(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.deploy_v1.types.GetConfigRequest, dict]):
The request object. Request to get a configuration.
name (:class:`str`):
Required. Name of requested
configuration.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.deploy_v1.types.Config:
Service-wide configuration.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_deploy.GetConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_config,
default_retry=retries.Retry(
initial=1.0,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-deploy",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("CloudDeployAsyncClient",)
|
murilobsd/unaerp
|
s1/pro_inte/servidor/cliente.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 <NAME>' <<EMAIL>>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import asyncio
import websockets
async def hello():
uri = "ws://172.20.10.2:1234"
async with websockets.connect(uri) as websocket:
await websocket.send("1")
asyncio.get_event_loop().run_until_complete(hello())
|
murilobsd/unaerp
|
s1/pro_inte/servidor/servidor.py
|
<filename>s1/pro_inte/servidor/servidor.py
#!/usr/bin/env python3
#
# Copyright (c) 2020 <NAME>' <<EMAIL>>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import os
import asyncio
import websockets
USUARIOS = set()
async def register(websocket):
USUARIOS.add(websocket)
async def unregister(websocket):
USUARIOS.remove(websocket)
async def notificar_usuarios(msg, websocket):
if USUARIOS:
await asyncio.wait([u.send(msg) for u in USUARIOS if u != websocket])
async def hello(websocket, path):
await register(websocket)
try:
async for msg in websocket:
await notificar_usuarios(msg, websocket)
finally:
await unregister(websocket)
def main(host, port):
start_server = websockets.serve(hello, host, port)
asyncio.get_event_loop().run_until_complete(start_server)
print("Start server on: %s:%d" % (host, port))
asyncio.get_event_loop().run_forever()
if __name__ == '__main__':
host = os.environ.get("HOST", "192.168.15.38")
port = int(os.environ.get("PORT", 1234))
main(host, port)
|
jrnp97/auth0-python-api-samples
|
00-Starter-Seed/server.py
|
"""Python Flask API Auth0 integration example
"""
from functools import wraps
import json
from os import environ as env
from typing import Dict
from six.moves.urllib.request import urlopen
from dotenv import load_dotenv, find_dotenv
from flask import Flask, request, jsonify, _request_ctx_stack, Response
from flask_cors import cross_origin
from jose import jwt
ENV_FILE = find_dotenv()
if ENV_FILE:
load_dotenv(ENV_FILE)
AUTH0_DOMAIN = env.get("AUTH0_DOMAIN")
API_IDENTIFIER = env.get("API_IDENTIFIER")
ALGORITHMS = ["RS256"]
APP = Flask(__name__)
# Format error response and append status code.
class AuthError(Exception):
"""
An AuthError is raised whenever the authentication failed.
"""
def __init__(self, error: Dict[str, str], status_code: int):
def __init__(self, error, status_code):
super().__init__()
self.error = error
self.status_code = status_code
@APP.errorhandler(AuthError)
def handle_auth_error(ex: AuthError) -> Response:
"""
serializes the given AuthError as json and sets the response status code accordingly.
:param ex: an auth error
:return: json serialized ex response
"""
response = jsonify(ex.error)
response.status_code = ex.status_code
return response
def get_token_auth_header() -> str:
"""Obtains the access token from the Authorization Header
"""
auth = request.headers.get("Authorization", None)
if not auth:
raise AuthError({"code": "authorization_header_missing",
"description":
"Authorization header is expected"}, 401)
parts = auth.split()
if parts[0].lower() != "bearer":
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must start with"
" Bearer"}, 401)
if len(parts) == 1:
raise AuthError({"code": "invalid_header",
"description": "Token not found"}, 401)
if len(parts) > 2:
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must be"
" Bearer token"}, 401)
token = parts[1]
return token
def requires_scope(required_scope: str) -> bool:
"""Determines if the required scope is present in the access token
Args:
required_scope (str): The scope required to access the resource
"""
token = get_token_auth_header()
unverified_claims = jwt.get_unverified_claims(token)
if unverified_claims.get("scope"):
token_scopes = unverified_claims["scope"].split()
for token_scope in token_scopes:
if token_scope == required_scope:
return True
return False
def requires_auth(func):
"""Determines if the access token is valid
"""
@wraps(func)
def decorated(*args, **kwargs):
token = get_token_auth_header()
jsonurl = urlopen("https://" + AUTH0_DOMAIN + "/.well-known/jwks.json")
jwks = json.loads(jsonurl.read())
try:
unverified_header = jwt.get_unverified_header(token)
except jwt.JWTError as jwt_error:
raise AuthError({"code": "invalid_header",
"description":
"Invalid header. "
"Use an RS256 signed JWT Access Token"}, 401) from jwt_error
if unverified_header["alg"] == "HS256":
raise AuthError({"code": "invalid_header",
"description":
"Invalid header. "
"Use an RS256 signed JWT Access Token"}, 401)
rsa_key = {}
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
if rsa_key:
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=ALGORITHMS,
audience=API_IDENTIFIER,
issuer="https://" + AUTH0_DOMAIN + "/"
)
except jwt.ExpiredSignatureError as expired_sign_error:
raise AuthError({"code": "token_expired",
"description": "token is expired"}, 401) from expired_sign_error
except jwt.JWTClaimsError as jwt_claims_error:
raise AuthError({"code": "invalid_claims",
"description":
"incorrect claims,"
" please check the audience and issuer"}, 401) from jwt_claims_error
except Exception as exc:
raise AuthError({"code": "invalid_header",
"description":
"Unable to parse authentication"
" token."}, 401) from exc
_request_ctx_stack.top.current_user = payload
return func(*args, **kwargs)
raise AuthError({"code": "invalid_header",
"description": "Unable to find appropriate key"}, 401)
return decorated
# Controllers API
@APP.route("/api/public")
@cross_origin(headers=["Content-Type", "Authorization"])
def public():
"""No access token required to access this route
"""
response = "Hello from a public endpoint! You don't need to be authenticated to see this."
return jsonify(message=response)
@APP.route("/api/private")
@cross_origin(headers=["Content-Type", "Authorization"])
@cross_origin(headers=["Access-Control-Allow-Origin", "http://localhost:3000"])
@requires_auth
def private():
"""A valid access token is required to access this route
"""
response = "Hello from a private endpoint! You need to be authenticated to see this."
return jsonify(message=response)
@APP.route("/api/private-scoped")
@cross_origin(headers=["Content-Type", "Authorization"])
@cross_origin(headers=["Access-Control-Allow-Origin", "http://localhost:3000"])
@requires_auth
def private_scoped():
"""A valid access token and an appropriate scope are required to access this route
"""
if requires_scope("read:messages"):
response = "Hello from a private endpoint! You need to be authenticated and have a scope of read:messages to see this."
return jsonify(message=response)
raise AuthError({
"code": "Unauthorized",
"description": "You don't have access to this resource"
}, 403)
if __name__ == "__main__":
APP.run(host="0.0.0.0", port=env.get("PORT", 3010))
|
SmellyGeekBoy/piface-led-flasher
|
ledflasher.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ledflasher.py
#
# Copyright 2014 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
def main():
from time import sleep
import pifacedigitalio as p
p.init()
speed = 1
while(True):
if p.digital_read(0):
speed = 0.7
elif p.digital_read(1):
speed = 0.5
elif p.digital_read(2):
speed = 0.4
elif p.digital_read(3):
speed = 0.2
elif p.digital_read(4):
speed = 0.05
print("Speed:" + str(speed))
p.digital_write(0,1) #turn 0 on (green)
p.digital_write(3,1) #turn 3 on (buzzer)
sleep(speed)
p.digital_write(3,0) #turn 3 off (buzzer)
p.digital_write(0,0) #turn 0 off (green)
p.digital_write(1,1) #turn 1 on (yellow)
p.digital_write(3,1) #turn 3 on (buzzer)
sleep(speed)
p.digital_write(3,0) #turn 3 off (buzzer)
p.digital_write(1,0) #turn 1 off (yellow)
p.digital_write(2,1) #turn 2 on (red)
p.digital_write(3,1) #turn 3 on (buzzer)
sleep(speed)
p.digital_write(3,0) #turn 3 off (buzzer)
p.digital_write(2,0) #turn 2 off (red)
return 0
if __name__ == '__main__':
main()
|
akasa9525/grouper
|
grouper/processLabels.py
|
<gh_stars>1-10
import os
from subprocess import call
import pandas as pd
from collections import defaultdict
import logging
#run BLAST and return list of label files in order [TS->ASdb, AS->TSdb]
def runBLAST(faFile1, faFile2, outdir, threads):
logger = logging.getLogger("grouper")
TSdb = os.path.sep.join([outdir, "TS", "db"])
ASdb = os.path.sep.join([outdir, "AS", "db"])
call(["makeblastdb", "-in", faFile1, "-dbtype", "nucl", "-out", TSdb])
call(["makeblastdb", "-in", faFile2, "-dbtype", "nucl", "-out", ASdb])
labelFile1 = os.path.sep.join([outdir, "TS.ASdb" ])
labelFile2 = os.path.sep.join([outdir, "AS.TSdb" ])
#run blastn for TS against AS
call(["blastn", "-db", ASdb, "-query", faFile1, "-outfmt", "6", "-out", labelFile1, "-num_threads", str(threads)])
call(["blastn", "-db", TSdb, "-query", faFile2, "-outfmt", "6", "-out", labelFile2, "-num_threads", str(threads)])
logging.info("Done running BLAST using {} threads".format(threads))
return [labelFile1, labelFile2]
#get the lengths of transcripts from one of the quant files
def getLengths(expDict):
logger = logging.getLogger("grouper")
lengths = {}
condition = list(expDict)[0]
quantFile = os.path.sep.join([expDict[condition][0], "quant.sf"])
logging.info("Getting txp lengths from: {}".format(quantFile))
lengths = pd.read_table(quantFile, names=["Name", "Length"], usecols=["Name", "Length"])
lengths.set_index("Name", inplace=True)
lengths = lengths.to_dict()['Length']
return lengths
#read in the BLAST results and process them to get two way best match. This creates the seed file for Junto.
#Label files should be in order [TS->ASdb, AS->TSdb]
def genFinalLabels(expDict, keys, labelFiles, finalLabelFile, outdir):
lengths = getLengths(expDict)
blastOut = open(keys["seed_file"], 'w')
if (len(labelFiles)==1):
with open(labelFiles[0].strip(), 'r') as f:
data = pd.read_table(f, header=None, usecols=[0,1], names=['query', 'subject'])
table1 = data.set_index("query").to_dict()['subject']
try:
for key,value in table1.items():
probability=1.0
blastOut.write(key + "\t" + value + "\t" +str(probability)+ "\n")
except KeyError:
pass
else:
labelFiles[0] = labelFiles[0].strip()
labelFiles[1] = labelFiles[1].strip()
outfile1 = outdir + "best" + os.path.basename(labelFiles[0])
outfile2 = outdir + "best" + os.path.basename(labelFiles[1])
command = "cat " + labelFiles[0] + " | sort -k1,1 -k12,12nr -k11,11n | sort -u -k1,1 --merge > " + outfile1
call(command, shell=True)
command = "cat " + labelFiles[1] + " | sort -k2,2 -k12,12nr -k11,11n | sort -u -k2,2 --merge > " + outfile2
call(command, shell=True)
with open(outfile1, 'r') as f:
table1 = {}
lentable1 = {}
for line in f:
line = line.split()
table1[line[0]] = line[1]
lentable1[line[0]] = abs(float(line[6]) - float(line[7]))
with open(outfile2, 'r') as f:
table2 = {}
lentable2 = {}
for line in f:
line = line.split()
table2[line[1]] = line[0]
lentable2[line[1]] = abs(float(line[8]) - float(line[9]))
call(["rm", outfile1])
call(["rm", outfile2])
try:
for key,value in table1.items():
if key in table2:
alignLen = max(lentable1[key], lentable2[key])
probability=min(1.0, (alignLen / float(lengths[key])))
blastOut.write(key + "\t" + value + "\t" +str(probability)+ "\n")
except KeyError:
pass
blastOut.close()
call(["cp", keys["seed_file"], finalLabelFile])
|
akasa9525/grouper
|
grouper/filtGraph.py
|
from __future__ import print_function
import os
import pandas as pd
import math
import logging
class EquivCollection(object):
def __init__(self):
self.tnames = []
self.eqClasses = {}
self.hasNames = False
def setNames(self, names):
self.tnames = names
self.hasNames = True
def add(self, tids, count):
if tids in self.eqClasses:
self.eqClasses[tids] += count
else:
self.eqClasses[tids] = count
def readEqClass(eqfile, eqCollection):
with open(eqfile) as ifile:
numTran = int(ifile.readline().rstrip())
numEq = int(ifile.readline().rstrip())
if not eqCollection.hasNames:
tnames = []
for i in xrange(numTran):
tnames.append(ifile.readline().rstrip())
eqCollection.setNames(tnames)
else:
for i in xrange(numTran):
ifile.readline()
for i in xrange(numEq):
toks = map(int, ifile.readline().rstrip().split('\t'))
nt = toks[0]
tids = tuple(toks[1:-1])
count = toks[-1]
eqCollection.add(tids, count)
def getCountsFromEquiv(eqCollection):
countDict = {}
tn = eqCollection.tnames
for tids, count in eqCollection.eqClasses.iteritems():
for t in tids:
if tn[t] in countDict:
countDict[tn[t]] += count
else:
countDict[tn[t]] = count
# ensure no division by 0
for t in eqCollection.tnames:
if t in countDict:
countDict[t] += 1.0
else:
countDict[t] = 1.0
return countDict
def filter(expDict, netfile, ofile, auxDir):
logger = logging.getLogger("grouper")
# Get just the set of condition names
conditions = expDict.keys()
print("conditions = {}".format(conditions))
eqClasses = {}
for cond in conditions:
print(expDict[cond])
for sampNum, sampPath in expDict[cond].iteritems():
if cond not in eqClasses:
eqClasses[cond] = EquivCollection()
eqPath = os.path.sep.join([sampPath, auxDir, "eq_classes.txt"])
readEqClass(eqPath, eqClasses[cond])
ambigCounts = {cond : getCountsFromEquiv(eqClasses[cond]) for cond in conditions}
sailfish = {}
for cond in conditions:
sailfish[cond] = ambigCounts[cond]
count = 0
numTrimmed = 0
with open(netfile) as f, open(ofile, 'w') as ofile:
data = pd.read_table(f, header=None)
for i in range(len(data)):
count += 1
print("\r{} done".format(count), end="")
#Alternative hypo
x = data[0][i]
y = data[1][i]
non_null=0
x_all=0
y_all=0
for cond in conditions:
y_g = sailfish[cond][y]
x_g = sailfish[cond][x]
r = y_g / x_g
non_null += (y_g * math.log(r*x_g)) - (r*x_g)
non_null += (x_g * math.log(x_g)) - x_g
x_all += x_g
y_all += y_g
#null hypothesis
null = 0
r_all = y_all / x_all
for cond in conditions:
y_g = sailfish[cond][y]
x_g = sailfish[cond][x]
mean_x = (x_g + y_g) / (1+r_all)
null += (y_g * math.log(r_all * mean_x)) - (r_all * mean_x)
null += (x_g * math.log(mean_x)) - mean_x
D = 2*(non_null-null)
if D <= 20:
ofile.write("{}\t{}\t{}\n".format(x, y, data[2][i]))
else:
numTrimmed += 1
logging.info("Trimmed {} edges after label propagation.".format(numTrimmed))
|
akasa9525/grouper
|
grouper/iterLabel.py
|
<filename>grouper/iterLabel.py
import sys
import os
import argparse
import itertools
import statistics
import re
import csv
import fileinput
import numpy as np
from collections import defaultdict
from subprocess import call
import pandas as pd
from collections import Counter
alpha = 0.8 #ratio alloted to the probability value for labels
newEdgeProb = 0.9 #add a new edge if the shared label prob is greater than this
def getMedWeight(graph, node1, node2):
weights = []
for (x, weight) in graph[node1]:
if weight != 1.1:
weights.append(weight)
for (x, weight) in graph[node2]:
if weight != 1.1:
weights.append(weight)
if not weights:
return(0)
else:
return(statistics.median(weights))
def readLabels(keys, iterNum):
#below we get labelToContigs = labels -> contigs that map to this label
#and contigToLabels = contigs -> all labels
#contigLabelsToProb = contig, labels -> prob
labelToContigs = defaultdict(set)
contigToLabels = defaultdict(set)
contigLabelsToProb = {}
with open(keys["seed_file"], 'r') as ifile:
for line in ifile:
data = (line.strip('\n')).split('\t')
contigName = data[0]
if (iterNum >= 1):
data = data[3].split()
curLabel = ""
for i in range(1, len(data), 2):
curLabel = data[i-1]
if (curLabel != "" and curLabel != "__DUMMY__" and data[i].lower() != "nan" and curLabel != "IGNORE"):
contigToLabels[contigName].add(curLabel)
labelToContigs[curLabel].add(contigName)
contigLabelsToProb[(contigName, curLabel)] = float(data[i])
else:
curLabel = data[1]
contigToLabels[contigName].add(curLabel)
labelToContigs[curLabel].add(contigName)
contigLabelsToProb[(contigName, curLabel)] = float(data[i])
return(labelToContigs, contigToLabels, contigLabelsToProb)
#get the sum of the probabilities that 2 nodes have the same label
#@profile
def getProb(contigToLabels, contigLabelsToProb, node1, node2):
totalProbability = 0
sharedLabels = contigToLabels[node1].intersection(contigToLabels[node2])
for label in sharedLabels:
totalProbability += (contigLabelsToProb[(node1, label)] * contigLabelsToProb[(node2, label)])
return totalProbability
#@profile
def changeEdgeWeights(orgGraph, graph, contigToLabels, contigLabelsToProb, ofile):
changesMade = 0
noChange = 0
weightCalc = 0
selfLoops = 0
for (node1, node2), weight in graph.iteritems():
if node1 == node2:
ofile.write(node1 + "\t" + node2 + "\t1.1\n")
selfLoops += 1
else:
prob = getProb(contigToLabels, contigLabelsToProb, node1, node2)
if prob > 0:
for (x, w) in orgGraph[node1]:
if x == node2:
orgWeight = w
for (x, w) in orgGraph[node2]:
if x == node1:
orgWeight = w
newWeight = ((1 - alpha) * orgWeight) + (prob * alpha)
changesMade += 1
else:
newWeight = weight
noChange += 1
weightCalc += newWeight
ofile.write(node1 + "\t" + node2 + "\t" + str(newWeight) + "\n")
weightCalc /= (changesMade+noChange)
return (weightCalc, (changesMade + noChange + selfLoops))
#@profile
def addNewEdges(orgGraph, graph, contigToLabels, labelToContigs, contigLabelsToProb, ofile):
changesMade = 0
weightCalc = 0
for label in labelToContigs:
for node1, node2 in itertools.combinations(labelToContigs[label], 2):
if node1 != node2:
if (node1, node2) not in graph and (node2, node1) not in graph:
totalProb = getProb(contigToLabels, contigLabelsToProb, node1, node2)
if (totalProb > newEdgeProb):
orgWeight = getMedWeight(orgGraph, node1, node2)
newWeight = ((1 - alpha) * orgWeight) + (totalProb * alpha)
ofile.write(node1 + "\t" + node2 + "\t" + str(newWeight) + "\n")
orgGraph[node1].add((node2, newWeight))
changesMade += 1
weightCalc += newWeight
if (changesMade > 0):
weightCalc /= changesMade
return (weightCalc, changesMade)
def run(keys, labelFile, juntoConfigFile, netFile, outdir):
if (netFile == keys["graph_file"]):
print ("ERROR: junto graph file should be different from the net file otherwise it will be overwritten")
return 0
orgGraph = defaultdict(set)
orgGraphSize = 0
avgGraphWeight = 0
with open(netFile, 'r') as ifile:
for line in ifile:
edge = (line.strip('\n')).split('\t')
orgGraph[edge[0]].add((edge[1], float(edge[2])))
orgGraphSize += 1
if (float(edge[2]) != 1.1):
avgGraphWeight += float(edge[2])
avgGraphWeight /= orgGraphSize
diff = 10000
prevDiff = 0
iters = 1
i = 0
with open(keys["graph_file"], 'w') as f:
for node1 in orgGraph:
for node2, weight in orgGraph[node1]:
if node1 == node2:
f.write(node1 + "\t" + node2 + "\t" + str(0.00001) + "\n")
else:
f.write(node1 + "\t" + node2 + "\t" + str(weight) + "\n")
#while (abs(prevDiff - diff) > (0.1*prevDiff)):
while (diff > (0.05*prevDiff)):
i += 1
print ("Started iteration number: " + str(i) + "\n")
if (i == 2):
prevDiff = diff;
tempfile = open("temp.txt", 'w')
with open(keys["graph_file"], 'r') as ifile:
for line in ifile:
edge = (line.strip('\n')).split('\t')
node1 = edge[0]
node2 = edge[1]
weight = float(edge[2])
if node1 == node2:
tempfile.write(node1 + "\t" + node2 + "\t" + str(0.00001) + "\n")
else:
tempfile.write(node1 + "\t" + node2 + "\t" + str(weight) + "\n")
tempfile.close()
call(["mv", "temp.txt", keys["graph_file"]])
call(["junto", "config", juntoConfigFile])
call(["cp", keys["output_file"], keys["seed_file"]])
print("Done running label propogation: processing results.")
graph = {}
graphSize = 0
with open(keys["graph_file"], 'r') as ifile:
for line in ifile:
edge = (line.strip('\n')).split('\t')
if edge[0] != edge[1]:
graph[(edge[0], edge[1])] = float(edge[2])
else:
graph[(edge[0], edge[1])] = 1.1
graphSize += 1
labelToContigs, contigToLabels, contigLabelsToProb = readLabels(keys, i)
print("Done reading labels.")
sizeNewGraph = 0
with open(keys["graph_file"], 'w') as ofile:
#write previous edges with new weights
(avgOldWeight, temp) = changeEdgeWeights(orgGraph, graph, contigToLabels, contigLabelsToProb, ofile)
sizeNewGraph += temp
#add new edges between contigs with same labels
(avgNewWeight, temp) = addNewEdges(orgGraph, graph, contigToLabels, labelToContigs, contigLabelsToProb, ofile)
sizeNewGraph += temp
oldGraphWeight = avgGraphWeight
avgGraphWeight = (avgOldWeight + avgNewWeight)/2
with open(keys["seed_file"], 'w') as ofile:
for contig in contigToLabels:
curProb = 0
for label in contigToLabels[contig]:
temp = contigLabelsToProb[(contig, label)]
ofile.write(contig + "\t" + label + "\t" + str(temp) + "\n")
curProb += temp
if (curProb < 1):
ofile.write(contig + "\t" + "IGNORE" + "\t" + str(1-curProb) + "\n")
diff = abs(sizeNewGraph - graphSize)
print("Size diff: {}".format(diff))
with open(juntoConfigFile, "w") as configFile:
for x in keys:
configFile.write(x + " = " + keys[x] + "\n")
configFile.write("data_format = edge_factored\n")
configFile.write("iters = " + str(iters) + "\n")
configFile.write("prune_threshold = 0\n")
configFile.write("algo = adsorption\n")
|
akasa9525/grouper
|
setup.py
|
from setuptools import setup
setup(name='biogrouper',
python_requires="<3.0",
version='0.1.3',
scripts=['bin/Grouper'],
description='Graph-based clustering and annotation for improved de novo transcriptome analysis',
url='https://github.com/COMBINE-lab/Grouper',
author='<NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
license='BSD with attribution',
packages=['grouper'],
install_requires=[
'PyYAML',
'coloredlogs',
'click',
'networkx==1.11',
'numpy',
'pandas',
'tqdm',
'statistics'
],
zip_safe=False)
|
akasa9525/grouper
|
grouper/eqnet.py
|
from __future__ import print_function
import itertools
import pandas as pd
import numpy as np
import os
import logging
import json
import networkx as nx
import math
from tqdm import tqdm
def buildNetFile(sampdirs, netfile, cutoff, auxDir, status_file,writecomponents=False):
logger = logging.getLogger("grouper")
sep = os.path.sep
sffiles = [sep.join([sd, 'quant.sf']) for sd in sampdirs]
eqfiles = [sep.join([sd, auxDir, '/eq_classes.txt']) for sd in sampdirs]
tnames = []
weightDict = {}
diagCounts = None
sumCounts = None
ambigCounts = None
firstSamp = True
numSamp = 0
tot = 0
eqClasses = {}
for sffile, eqfile in itertools.izip(sffiles, eqfiles):
quant = pd.read_table(sffile)
quant.set_index('Name', inplace=True)
with open(eqfile) as ifile:
numSamp += 1
numTran = int(ifile.readline().rstrip())
numEq = int(ifile.readline().rstrip())
logging.info("quant file: {}; eq file: {}; # tran = {}; # eq = {}".format(sffile, eqfile, numTran, numEq))
if firstSamp:
for i in xrange(numTran):
tnames.append(ifile.readline().rstrip())
diagCounts = np.zeros(len(tnames))
sumCounts = np.zeros(len(tnames))
ambigCounts = np.zeros(len(tnames))
else:
for i in xrange(numTran):
ifile.readline()
# for easy access to quantities of interest
tpm = quant.loc[tnames, 'TPM'].values
estCount = quant.loc[tnames, 'NumReads'].values
efflens = quant.loc[tnames, 'EffectiveLength'].values
epsilon = np.finfo(float).eps
sumCounts = np.maximum(sumCounts, estCount)
for i in xrange(numEq):
toks = map(int, ifile.readline().rstrip().split('\t'))
nt = toks[0]
tids = tuple(toks[1:-1])
count = toks[-1]
if tids in eqClasses:
eqClasses[tids] += count
else:
eqClasses[tids] = count
# Add the contribution to the graph
denom = sum([tpm[t] for t in tids])
for t1, t2 in itertools.combinations(tids,2):
w = count
key = (t1, t2)
if key in weightDict:
weightDict[key] += w
else:
weightDict[key] = w
for t in tids:
diagCounts[t] += count * (tpm[t] / denom)
firstSamp = False
ambigCounts[t] += count
lens = quant.loc[tnames, 'Length'].values
maxWeight = 0.0
minWeight = 0.0
prior = 0.1
edgesToRemove = []
##
# Go through the weightMap and remove any edges that
# have endpoints with too few mapping reads
##
for k,v in weightDict.iteritems():
c0, c1 = diagCounts[k[0]], diagCounts[k[1]]
a0, a1 = ambigCounts[k[0]], ambigCounts[k[1]]
if a0 + a1 > epsilon and a0 > cutoff and a1 > cutoff:
w = (v+prior) / min((a0+prior), (a1+prior))
if w > minWeight:
weightDict[k] = w
if w > maxWeight:
maxWeight = w
else:
edgesToRemove.append(k)
else:
edgesToRemove.append(k)
# Actually delete those edges
for e in edgesToRemove:
del weightDict[e]
def nearEnd(tup):
txp = tup[0]
pos = tup[1]
moverhang = 10
ml = 100
if pos < -moverhang or pos > lens[txp] + moverhang:
return False
elif pos <= ml or pos >= lens[txp] - ml:
return True
else:
return False
tnamesFilt = []
relabel = {}
for i in xrange(len(estCount)):
if (ambigCounts[i] > cutoff):
relabel[i] = len(tnamesFilt)
tnamesFilt.append(tnames[i])
weightDict[(i, i)] = 1.1
G = nx.Graph() if writecomponents else None
with open(netfile, 'w') as ofile:
writeEdgeList(weightDict, tnames, ofile, G)
if G is not None:
clustFile = netfile.split('.net')[0] + '.clust'
print("Writing connected components as clusters to {}".format(clustFile))
with open(clustFile, 'w') as ofile:
cc = nx.connected_component_subgraphs(G)
for c in cc:
ofile.write('{}\n'.format('\t'.join(c.nodes())))
print("started status_file")
dict_ = {"eqnet.buildNetFile" : True}
with open(status_file, 'r') as temp:
data = json.load(temp)
data.update(dict_)
with open(status_file, 'w') as temp:
json.dump(data, temp)
print("updated status_file")
def writeEdgeList(weightDict, tnames, ofile, G):
useGraph = G is not None
for k,v in weightDict.items():
ofile.write("{}\t{}\t{}\n".format(tnames[k[0]], tnames[k[1]], v))
if useGraph:
G.add_edge(tnames[k[0]], tnames[k[1]])
def writePajek(weightDict, tnames, relabel, ofile):
with open(netfile, 'w') as ofile:
ofile.write("*Vertices\t{}\n".format(len(tnamesFilt)))
for i, n in enumerate(tnamesFilt):
ofile.write("{}\t\"{}\"\n".format(i, n))
ofile.write("*Edges\n")
print("There are {} edges\n".format(len(weightDict)))
for k,v in weightDict.items():
ofile.write("{}\t{}\t{}\n".format(relabel[k[0]], relabel[k[1]], v))
class EquivCollection(object):
def __init__(self):
self.tnames = []
self.eqClasses = {}
self.hasNames = False
def setNames(self, names):
self.tnames = names
self.hasNames = True
def add(self, tids, count):
if tids in self.eqClasses:
self.eqClasses[tids] += count
else:
self.eqClasses[tids] = count
def readEqClass(eqfile, eqCollection):
with open(eqfile) as ifile:
numTran = int(ifile.readline().rstrip())
numEq = int(ifile.readline().rstrip())
print("file: {}; # tran = {}; # eq = {}".format(eqfile, numTran, numEq))
if not eqCollection.hasNames:
tnames = []
for i in range(numTran):
tnames.append(ifile.readline().rstrip())
eqCollection.setNames(tnames)
else:
for i in range(numTran):
ifile.readline()
for i in range(numEq):
toks = list(map(int, ifile.readline().rstrip().split('\t')))
nt = toks[0]
tids = tuple(toks[1:-1])
count = toks[-1]
eqCollection.add(tids, count)
def getCountsFromEquiv(eqCollection):
countDict = {}
tn = eqCollection.tnames
for tids, count in eqCollection.eqClasses.items():
for t in tids:
if tn[t] in countDict:
countDict[tn[t]] += count
else:
countDict[tn[t]] = count
# ensure no division by 0
for t in eqCollection.tnames:
if t in countDict:
countDict[t] += 1.0
else:
countDict[t] = 1.0
return countDict
def flattenClusters(infile, outfile):
with open(outfile, 'w') as ofile:
with open(infile) as ifile:
for i,l in enumerate(ifile):
toks = l.rstrip().split()
cname = "cluster{}".format(i)
for t in toks:
ofile.write("{}\t{}\n".format(cname, t))
def filterGraph(expDict, netfile, outfile, auxDir, mincut):
logger = logging.getLogger("grouper")
# Get just the set of condition names
conditions = expDict.keys()
logging.info("conditions = {}".format(conditions))
eqClasses = {}
for cond in conditions:
print(expDict[cond])
for sampNum, sampPath in expDict[cond].items():
if cond not in eqClasses:
eqClasses[cond] = EquivCollection()
eqPath = os.path.sep.join([sampPath, auxDir, "/eq_classes.txt"])
readEqClass(eqPath, eqClasses[cond])
ambigCounts = {cond : getCountsFromEquiv(eqClasses[cond]) for cond in conditions}
sailfish = {}
for cond in conditions:
sailfish[cond] = ambigCounts[cond]
G = nx.Graph()
with open(netfile) as f:
data = pd.read_table(f, header=None)
for i in range(len(data)):
G.add_edge(data[0][i], data[1][i], capacity = float(data[2][i]))
logging.info("Done reading files for filtering.")
count = 0
numTrimmed = 0
numCut = 0
cutset = set()
with open(netfile) as f, open(outfile, 'w') as ofile:
data = pd.read_table(f, header=None)
#for i in tqdm(range(len(data))):
for i in range(len(data)):
count += 1
print("\r{} edges checked".format(count), end="")
#Alternative hypo
x = data[0][i]
y = data[1][i]
non_null=0
x_all=0
y_all=0
for cond in conditions:
y_g = sailfish[cond][y]
x_g = sailfish[cond][x]
r = y_g / x_g
non_null += (y_g * math.log(r*x_g)) - (r*x_g)
non_null += (x_g * math.log(x_g)) - x_g
x_all += x_g
y_all += y_g
#null hypothesis
null = 0
r_all = y_all / x_all
for cond in conditions:
y_g = sailfish[cond][y]
x_g = sailfish[cond][x]
mean_x = (x_g + y_g) / (1+r_all)
null += (y_g * math.log(r_all * mean_x)) - (r_all * mean_x)
null += (x_g * math.log(mean_x)) - mean_x
D = 2*(non_null-null)
if mincut:
if D > 20 and x != y:
s = G.subgraph(nx.shortest_path(G,x))
value, partition = nx.minimum_cut(s, x, y)
if value < 10:
reachable, non_reachable = partition
for e in s.edges_iter(data='capacity'):
if (e[0] in reachable and e[1] in non_reachable) or (e[0] in non_reachable and e[1] in reachable):
cutset.add((e[0], e[1]))
numCut += 1
numTrimmed += 1
else:
if D > 20 and x != y:
numTrimmed += 1
cutset.add((x, y))
G.remove_edges_from(list(cutset))
for e in G.edges(data = "capacity"):
ofile.write(e[0] + "\t" + e[1] + "\t" + str(e[2]) + "\n")
logging.info("Trimmed {} edges".format(numTrimmed))
logging.info("Cut performed on {} edges".format(numCut))
def addOrphanLinks(sampdirs, auxDir, orphanFileName, cutoff, netFileIn, netFileOut):
logger = logging.getLogger("grouper")
sep = os.path.sep
sffiles = [sep.join([sd, 'quant.sf']) for sd in sampdirs]
eqfiles = [sep.join([sd, auxDir, '/eq_classes.txt']) for sd in sampdirs]
tnames = []
weightDict = {}
diagCounts = None
sumCounts = None
ambigCounts = None
firstSamp = True
numSamp = 0
eqClasses = {}
for sffile, eqfile in itertools.izip(sffiles, eqfiles):
quant = pd.read_table(sffile)
quant.set_index('Name', inplace=True)
with open(eqfile) as ifile:
numSamp += 1
numTran = int(ifile.readline().rstrip())
numEq = int(ifile.readline().rstrip())
if firstSamp:
for i in range(numTran):
tnames.append(ifile.readline().rstrip())
diagCounts = np.zeros(len(tnames))
sumCounts = np.zeros(len(tnames))
ambigCounts = np.zeros(len(tnames))
else:
for i in range(numTran):
ifile.readline()
# for easy access to quantities of interest
tpm = quant.loc[tnames, 'TPM'].values
estCount = quant.loc[tnames, 'NumReads'].values
efflens = quant.loc[tnames, 'EffectiveLength'].values
epsilon = np.finfo(float).eps
sumCounts = np.maximum(sumCounts, estCount)
for i in range(numEq):
toks = map(int, ifile.readline().rstrip().split('\t'))
nt = toks[0]
tids = tuple(toks[1:-1])
count = toks[-1]
if tids in eqClasses:
eqClasses[tids] += count
else:
eqClasses[tids] = count
denom = sum([tpm[t] for t in tids])
for t in tids:
diagCounts[t] += count * (tpm[t] / denom)
ambigCounts[t] += count
firstSamp = False
lens = quant.loc[tnames, 'Length'].values
logging.info("Done reading files for adding orphans")
# Considering Orphan reads
def nearEnd(tup):
txp = tup[0]
pos = tup[1]
moverhang = 10
ml = 100
if pos < -moverhang or pos > lens[txp] + moverhang:
return False
elif pos <= ml or pos >= lens[txp] - ml:
return True
else:
return False
vertices = set()
with open(netFileIn, 'r') as ifile:
for line in ifile:
line = line.split()
vertices.add(line[0])
vertices.add(line[1])
count = 0
seenOrphan = {}
orphanDict = {}
orphanLinkFiles = [sep.join([sd, auxDir, orphanFileName]) for sd in sampdirs]
haveLinkFiles = all(os.path.isfile(f) for f in orphanLinkFiles)
numOrphanLinks = 0
if haveLinkFiles:
for olfile in orphanLinkFiles:
for l in open(olfile):
left, right = l.rstrip().split(':')
lp = [map(int, i.split(',')) for i in left.rstrip('\t').split('\t')]
rp = [map(int, i.split(',')) for i in right.split('\t')]
lp = [t for t in filter(nearEnd, lp)]
rp = [t for t in filter(nearEnd, rp)]
#if len(lp) == 1 or len(rp) == 1:
for a, b in itertools.product(lp, rp):
ltpm = tpm[a[0]] + 10 ** -11 # Laplacian Smoothing
rtpm = tpm[b[0]] + 10 ** -11
tpm_ratio = 1 - (abs(ltpm - rtpm) / (ltpm + rtpm))
read_dist = lens[a[0]] - a[1] + b[1]
if tpm_ratio >= .5: # and read_dist <= 300 and tpm[a[0]] > .5 and tpm[b[0]] > .5:
a = a[0]; b = b[0]
key = (a, b) if a < b else (b, a)
if ambigCounts[a] < cutoff or ambigCounts[b] < cutoff:
continue
c0, c1 = diagCounts[a], diagCounts[b]
a0, a1 = ambigCounts[a], ambigCounts[b]
if a not in seenOrphan:
seenOrphan[a] = set()
if b not in seenOrphan:
seenOrphan[b] = set()
seenOrphan[a].add(b)
seenOrphan[b].add(a)
if key not in orphanDict:
orphanDict[key] = 1.0 / min(a0, a1)
else:
orphanDict[key] += 1.0 / min(a0, a1)
for key, value in orphanDict.iteritems():
if len(seenOrphan[key[0]]) < 3 and len(seenOrphan[key[1]]) < 3:
if key not in weightDict:
numOrphanLinks += 1
weightDict[key] = 1.0 / min(a0, a1)
else:
weightDict[key] += 1.0 / min(a0, a1)
logging.info("Added {} orphan link edges".format(numOrphanLinks))
with open(netFileIn, 'r') as ifile, open(netFileOut, 'w') as ofile:
for line in ifile:
ofile.write(line)
for k,v in weightDict.items():
ofile.write("{}\t{}\t{}\n".format(tnames[k[0]], tnames[k[1]], v))
return weightDict;
|
Ansud/supres_founder
|
source/structures/intervals.py
|
"""
Intervals storage.
There is array of intervals, sorted by start.
The main functionality - detect is point belongs to any of them or not.
"""
class Intervals:
def __init__(self):
self.intervals = list()
def add(self, start: float, end: float):
# Let simplify my life and call normalize manually
self.intervals.append((start, end))
def normalize(self):
# Sort intervals by first point
self.intervals = sorted(self.intervals, key=lambda x: x[0])
intervals_iter = iter(self.intervals)
current = next(intervals_iter)
# Merge them to another list
out = list()
# The tuple can't be modified, thus make it list and convert later back
out.append([current[0], current[1]])
current_end = out[0][1]
for item in intervals_iter:
if item[0] <= current_end:
if item[1] <= current_end:
continue
out[-1][1] = item[1]
else:
out.append([item[0], item[1]])
current_end = item[1]
# Convert back to tuples
self.intervals = [tuple(x) for x in out]
def hit(self, point: float):
# Run through intevals list to find points
start = 0
end = len(self.intervals)
while True:
length = (end - start) // 2
position = start + length
current = self.intervals[position]
# Point lay in interval
if current[0] <= point <= current[1]:
return True
if start == end or not length:
return False
# Calculate next interval
if point > current[1]:
start = position
elif point < current[0]:
end = position
def miss(self, point: float):
return not self.hit(point)
# TODO: Create tests folder and move to unit tests
@staticmethod
def test_normalize():
values = [(0, 1), (2, 3), (4, 5), (5, 6), (10, 15), (13, 25), (100, 1000), (50, 60), (70, 80), (55, 57)]
expected = [(0, 1), (2, 3), (4, 6), (10, 25), (50, 60), (70, 80), (100, 1000)]
interval = Intervals()
for v in values:
interval.add(v[0], v[1])
interval.normalize()
assert len(expected) == len(interval.intervals)
for i in range(len(expected)):
assert expected[i] == interval.intervals[i]
@staticmethod
def test_hit():
test_data = [
(-1, False), (100000, False), (9, False), (93, False),
(0.001, True), (4.5, True), (999, True), (80, True), (0, True), (1000, True), (86, True), (51, True),
]
interval = Intervals()
for v in [(0, 1), (4, 5), (6, 8), (10, 25), (50, 60), (70, 80), (85, 90), (100, 1000)]:
interval.add(v[0], v[1])
interval.normalize()
for item in test_data:
print('Test {0} -> {1}'.format(item[0], item[1]))
assert interval.hit(item[0]) == item[1]
|
Ansud/supres_founder
|
find_levels.py
|
<filename>find_levels.py
"""
Main executable file used to start project
"""
import asyncio
from source.core import run_project
def main():
asyncio.run(run_project())
if __name__ == "__main__":
main()
|
Ansud/supres_founder
|
source/parser/__init__.py
|
"""
Python project
"""
from .csv import parse_csv
|
Ansud/supres_founder
|
source/parse_arguments.py
|
"""
Argument specifications and parsing
"""
import argparse
class ArgumentParser:
def __init__(self):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='Commands help')
parser.add_argument("--price-sorted", help="Sort levels by price instead of count", action='store_true')
parser.add_argument("--threshold", type=int, help="Level kick count threshold", default=5)
parser.add_argument(
"--price-fuzz", type=float,
help="Price fuzz. I.e. count prices in some neighbourhood of level",
default=0
)
csv_parser = subparsers.add_parser('csv', help='Load data from CSV files')
csv_parser.add_argument("--intraday", type=str, help="CSV file with OHLC intraday bar values")
csv_parser.add_argument("--daily", type=str, help="CSV file with OHLC bar values in daily timeframe")
csv_parser.add_argument("--ohlc-positions", type=str, help="CSV OHLC comma separated positions", default='0, 1, 2, 3')
net_parser = subparsers.add_parser('fetch', help='Fetch data from internet sources')
net_parser.add_argument("--ticker", type=str, help="Equity ticker (download only)")
self.arguments = parser.parse_args()
@property
def ohlc_positions(self):
return [int(x) for x in self.arguments.ohlc_positions.split(',')]
@property
def ticker(self):
ticker = self.arguments.ticker
if ticker:
ticker = ticker.upper()
return ticker
@property
def csv_mode(self):
return 'intraday' in self.arguments
@property
def fetch_mode(self):
return 'ticker' in self.arguments
def __getattr__(self, item):
return getattr(self.arguments, item)
|
Ansud/supres_founder
|
source/core.py
|
"""
Entry point for all operations
"""
import asyncio
from .analyzer import filter_daily_bars, filter_data, find_levels
from .downloader import download_daily_data, download_intraday_data
from .parse_arguments import ArgumentParser
from .parser import parse_csv
async def return_empty():
return list()
def load_data(arguments: ArgumentParser):
"""
:param arguments: program arguments
:return: coroutines tuple that will return two lists of OHLC data, first element used in filtering
"""
if arguments.csv_mode:
if arguments.daily is not None:
daily = parse_csv(arguments.arguments.daily, positions=arguments.ohlc_positions)
else:
daily = return_empty()
return daily, parse_csv(arguments.arguments.intraday, positions=arguments.ohlc_positions)
if arguments.fetch_mode:
return download_daily_data(arguments.ticker), download_intraday_data(arguments.ticker)
return return_empty(), return_empty()
async def run_project():
arguments = ArgumentParser()
daily, intraday = load_data(arguments)
daily, intraday = await asyncio.gather(daily, intraday)
daily = filter_daily_bars(daily)
data = filter_data(daily, intraday, arguments.price_fuzz)
levels = find_levels(data, arguments.threshold, arguments.price_sorted)
print('We found following levels:')
if not levels:
print('No any....')
return
for l in levels:
print('Level price: {0}\tcount {1}'.format(l[0], l[1]))
|
Ansud/supres_founder
|
source/downloader/__init__.py
|
"""
Python project
"""
from .base import download_daily_data, download_intraday_data
|
Ansud/supres_founder
|
source/parser/csv.py
|
<filename>source/parser/csv.py<gh_stars>1-10
"""
Parse CSV file to list of OHLC tuples
"""
import csv
from typing import Optional
from source.structures import OHLCData
async def parse_csv(
file_name: str,
delimiter: Optional[str] = ',',
positions: Optional[list] = None
):
"""
Parse CSV values to list of OHLC prices, syncronous.
:param file_name: File to read from
:param delimiter: CSV fields delimiter
:param positions: OHLC positions in CSV lines
:return: list of tuple(O, H, L, C)
"""
out = list()
with open(file_name, 'r') as file:
reader = csv.reader(file, delimiter=delimiter)
possible_header = next(reader)
try:
out.append(OHLCData(*[possible_header[x] for x in positions]))
except ValueError:
# This exception should happen only once or never.
# Thus it is not handled in loop
pass
for row in reader:
out.append(OHLCData(*[row[x] for x in positions]))
return out
|
Ansud/supres_founder
|
source/analyzer/finder.py
|
<reponame>Ansud/supres_founder
"""
Process data to find levels
"""
def inc_count(prices, level, bottom):
if level not in prices:
prices[level] = dict(b=0, t=0)
if bottom:
prices[level]['b'] += 1
else:
prices[level]['t'] += 1
def find_levels(data: list, threshold: int, price_sorted: bool):
"""
Find levels
:param data: list of OHLCData
:param threshold: do not return levels with kick count less than it
:param price_sorted: sort levels by price instead of kick count
:return: list of prices levels sorted by count
"""
levels = list()
prices = dict()
for d in data:
if d.open <= d.close:
inc_count(prices, d.open, bottom=False)
inc_count(prices, d.close, bottom=True)
else:
inc_count(prices, d.open, bottom=True)
inc_count(prices, d.close, bottom=False)
# High hits level from bottom only
inc_count(prices, d.high, bottom=True)
# Low hits level from top only
inc_count(prices, d.low, bottom=False)
# Linearize and remove levels < threshold count
for price, tb in prices.items():
if not tb['t'] and not tb['b']:
continue
if tb['t'] + tb['b'] < threshold:
continue
levels.append((price, tb['t'] + tb['b']))
key = 0 if price_sorted else 1
return sorted(levels, key=lambda x: x[key], reverse=True)
|
Ansud/supres_founder
|
source/structures/ohlc.py
|
"""
Data storage class
TODO: Add ability to analyse bar type
"""
class OHLCData:
def __init__(self, open, high, low, close):
self.open = float(open)
self.high = float(high)
self.low = float(low)
self.close = float(close)
def linearize(self):
return self.open, self.high, self.low, self.close
def shadows(self):
return self.high, self.low
|
Ansud/supres_founder
|
source/structures/__init__.py
|
<reponame>Ansud/supres_founder
"""
Python project
"""
from .ohlc import OHLCData
|
Ansud/supres_founder
|
source/downloader/base.py
|
<filename>source/downloader/base.py
"""
Grab data from internet
May be i will support more sources, but currently only alphavantage is supported,
thus this file a bit ridiculous
"""
from .alphavantage import get_daily_data, get_intraday_data
async def download_daily_data(ticker: str):
print('Start downloading daily data for {0}...'.format(ticker))
data = await get_daily_data(ticker)
print('Complete downloading daily data for {0}...'.format(ticker))
return data
async def download_intraday_data(ticker: str):
print('Start downloading intraday data for {0}...'.format(ticker))
data = await get_intraday_data(ticker)
print('Complete downloading intraday data for {0}...'.format(ticker))
return data
|
Ansud/supres_founder
|
source/downloader/alphavantage.py
|
<reponame>Ansud/supres_founder<filename>source/downloader/alphavantage.py
"""
Grab data from alphavantage
"""
import json
import urllib.parse
import urllib.request
from datetime import datetime
import aiohttp
from source.structures import OHLCData
# Base alphavantage settings
BASE_URL = 'https://www.alphavantage.co/query'
FUNC_INTRADAY = 'TIME_SERIES_INTRADAY'
FUNC_DAILY = 'TIME_SERIES_DAILY'
# This key is free and can be obtained from alphavantage site
# TODO: make it 'demo' and load from some file or settings.yml
ACCESS_KEY = '<KEY>'
# Keys in returned JSON
KEY_OPEN = '1. open'
KEY_CLOSE = '4. close'
KEY_HIGH = '2. high'
KEY_LOW = '3. low'
# Intraday supported time periods: 1min, 5min, 15min, 30min, 60min
TIME_1MIN = '1min'
TIME_5MIN = '5min'
TIME_15MIN = '15min'
TIME_30MIN = '30min'
TIME_60MIN = '60min'
TIME_DAILY = 'Daily'
SELECTED_TIME = TIME_5MIN
def get_time_series_key(time: str):
return 'Time Series ({0})'.format(time)
async def get_data(ticker: str, function: str):
parameters = dict(
function=function,
symbol=ticker.upper(),
apikey=ACCESS_KEY,
outputsize='full',
)
if function == FUNC_INTRADAY:
parameters['interval'] = SELECTED_TIME
url = BASE_URL + '?' + urllib.parse.urlencode(parameters)
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
return json.loads(await response.text())
async def get_parsed_data(ticker: str, function: str):
raw_data = await get_data(ticker, function)
"""
Extract values from JSON
"Time Series (5min)": {
"2019-07-18 16:00:00": {
"1. open": "1145.9500",
"2. high": "1147.5100",
"3. low": "1145.9500",
"4. close": "1146.5601",
"5. volume": "36550"
},
"""
if function == FUNC_DAILY:
series_key = TIME_DAILY
time_format = '%Y-%m-%d'
else:
series_key = SELECTED_TIME
time_format = '%Y-%m-%d %H:%M:%S'
data = raw_data.get(get_time_series_key(series_key))
out = list()
now = datetime.now()
if not data:
print('Weird response from server, no data found.\n{0}\n'.format(raw_data))
for key, item in data.items():
date = datetime.strptime(key, time_format)
# I'm interested in last year only
if (now - date).days > 365:
continue
out.append(OHLCData(
item[KEY_OPEN], item[KEY_HIGH], item[KEY_LOW], item[KEY_CLOSE]
))
return out
async def get_daily_data(ticker: str):
return await get_parsed_data(ticker, FUNC_DAILY)
async def get_intraday_data(ticker: str):
return await get_parsed_data(ticker, FUNC_INTRADAY)
|
Ansud/supres_founder
|
source/analyzer/__init__.py
|
"""
Python project
"""
from .finder import find_levels
from .filter import filter_daily_bars, filter_data
|
Ansud/supres_founder
|
source/analyzer/filter.py
|
<gh_stars>1-10
"""
Process daily and intraday data to filter out wrong levels
"""
from source.structures.intervals import Intervals
def filter_daily_bars(daily: list):
"""
Remove ordinary bars and leave only bars with high shadow
"""
return daily
def filter_data(daily: list, intraday: list, fuzz: float):
"""
The idea is simple: need to get all OHLC prices from daily basis and cleanup intraday
prices to remove all of them which are not located near daily prices with some fuzz.
:param daily: daily OHLC data
:param intraday: intraday OHLC data
:param fuzz: price level neighbourhood
:return: filtered OHLC data
"""
if not daily:
return intraday
possible = Intervals()
price_list = [item for ohlc in daily for item in ohlc.shadows()]
for price in price_list:
possible.add(price - fuzz, price + fuzz)
possible.normalize()
out = list()
for ohlc in intraday:
for price in ohlc.linearize():
if possible.miss(price):
continue
out.append(ohlc)
break
print('Filtering complete:\n\tRemoved\t{0}\n\tRemained\t{1}\n\tOverall\t{2}'.format(
len(intraday) - len(out), len(out), len(intraday)
))
return out
|
moemoe89/simple-api-flask
|
src/models/__init__.py
|
<reponame>moemoe89/simple-api-flask<gh_stars>0
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
bcrypt = Bcrypt()
db = SQLAlchemy()
from .UserModel import UserModel, UserSchema
|
moemoe89/simple-api-flask
|
src/models/UserModel.py
|
<filename>src/models/UserModel.py
import datetime
from marshmallow import fields, Schema
from . import db, bcrypt
class UserModel(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(128), unique=True, nullable=False)
password = db.Column(db.String(128), nullable=True)
phone = db.Column(db.String(128), nullable=False)
address = db.Column(db.Text, nullable=False)
created_at = db.Column(db.DateTime)
updated_at = db.Column(db.DateTime)
def __init__(self, data):
self.name = data.get('name')
self.email = data.get('email')
self.password = self.__<PASSWORD>_<PASSWORD>(data.get('password'))
self.phone = data.get('phone')
self.address = data.get('address')
self.created_at = datetime.datetime.utcnow()
self.updated_at = datetime.datetime.utcnow()
def save(self):
db.session.add(self)
db.session.commit()
def update(self, data):
for key, item in data.items():
if key == 'password':
self.password = self.__generate_hash(value)
setattr(self, key, item)
self.updated_at = datetime.datetime.utcnow()
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
@staticmethod
def get_all_users():
return UserModel.query.all()
@staticmethod
def get_one_user(id):
return UserModel.query.get(id)
def __repr(self):
return '<id {}>'.format(self.id)
def __generate_hash(self, password):
return bcrypt.generate_password_hash(password, rounds=10).decode("utf-8")
def check_hash(self, password):
return bcrypt.check_password_hash(self.password, password)
class UserSchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
email = fields.Email(required=True)
password = fields.Str(required=True)
phone = fields.Str(required=True)
address = fields.Str(required=True)
created_at = fields.DateTime(dump_only=True)
updated_at = fields.DateTime(dump_only=True)
|
moemoe89/simple-api-flask
|
src/app.py
|
<filename>src/app.py
from flask import Flask
from .config import app_config
from .models import db, bcrypt
def create_app(env_name):
app = Flask(__name__)
app.config.from_object(app_config[env_name])
bcrypt.init_app(app)
db.init_app(app)
@app.route('/', methods=['GET'])
def index():
return 'Pong'
return app
|
jadermcs/ZeCarioca
|
preprocess.py
|
import os
import glob
import tqdm
import random
import pandas as pd
path = "clear_threads/"
files = glob.glob(path+"*_*.tsv")
random.seed(42)
random.shuffle(files)
frames = []
counter = 0
for filein in tqdm.tqdm(files[:100000]):
counter += 1
df = pd.read_csv(filein, delimiter='\t', quoting=3, header=None,
names=["timestamp", "id", "text"])
df.drop(columns=['timestamp'], inplace=True)
df['reply'] = df['text'].shift(-1)
df = df.iloc[:-1]
df.drop(df[df.text.str.len() < 3 | df.reply.str.len() < 3].index,
inplace=True)
df.drop(df[df.text.str.contains(r'[^0-9a-zA-Z\.\,\?\!]') |
df.reply.str.contains(r'[^0-9a-zA-Z\.\,\?\!]')].index,
inplace=True)
os.remove(filein)
frames.append(df)
if counter % 1000 == 0:
out = pd.concat(frames, axis=0, ignore_index=True)
out.dropna(axis=0, inplace=True)
frames = []
out.to_parquet(path+str(counter)+'.parquet')
del out
|
jadermcs/ZeCarioca
|
finetune.py
|
<reponame>jadermcs/ZeCarioca<gh_stars>0
import json
import tqdm
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from transformers import Trainer, TrainingArguments
from datasets import load_dataset, load_metric
checkpoint = "models/adrenaline_multiwoz/epoch56_trloss0.40_gpt2"
# checkpoint = "pierreguillou/gpt2-small-portuguese"
with open("data/ontology.json") as fin:
tokens = json.load(fin)
tokenizer = GPT2Tokenizer.from_pretrained(checkpoint)
model = GPT2LMHeadModel.from_pretrained(checkpoint)
tokenizer.add_special_tokens({'additional_special_tokens': tokens})
tokenizer.save_pretrained("models/tokenizer/")
model.resize_token_embeddings(len(tokenizer))
datasets = load_dataset("json", data_files={"train":"data/process.train.json",
"valid":"data/process.valid.json"})
tokenizer.pad_token = tokenizer.eos_token
def add_tokens(examples):
res = tokenizer(examples['text'], max_length=512, truncation=True,
padding='max_length')
res['labels'] = res['input_ids'].copy()
return res
tokenized = datasets.map(
add_tokens,
num_proc=4,
remove_columns=["id", "text"])
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
acc = accuracy_score(labels, preds)
return { 'accuracy': acc }
training_args = TrainingArguments(
"test-clm",
evaluation_strategy="epoch",
per_device_train_batch_size=2,
gradient_accumulation_steps=32,
learning_rate=2e-5,
weight_decay=0.01,
num_train_epochs=100,
report_to="wandb",
run_name=checkpoint,
save_strategy="epoch"
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized["train"],
eval_dataset=tokenized["valid"],
# compute_metrics=compute_metrics,
)
trainer.train()
eval_results = trainer.evaluate()
print(f"Perplexity: {torch.exp(eval_results['eval_loss']):.2f}")
|
jadermcs/ZeCarioca
|
train.py
|
<reponame>jadermcs/ZeCarioca
import os
import math
import tqdm
import glob
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from transformers import Trainer, TrainingArguments
from datasets import load_dataset, load_metric
files = "clear_threads/*.parquet"
checkpoint = "pierreguillou/gpt2-small-portuguese"
BLOCK_SIZE = 128
tokenizer = GPT2Tokenizer.from_pretrained(checkpoint)
model = GPT2LMHeadModel.from_pretrained(checkpoint)
train_files, validation_files = train_test_split(glob.glob(files)[:2],
test_size=0.1)
datasets = load_dataset("parquet", data_files={"train": train_files,
"validation": validation_files})
datasets = datasets.filter(lambda x: x['text'] is not None and x['reply'] is\
not None and len(x['text']+x['reply']) < 10000)
datasets = datasets.files(lambda x: x['text'].endswith((".", "?", "!")) and\
x['reply'].endswith((".", "?", "!")))
special_tokens = ["<sos_u>", "<eos_u>", "<sos_r>", "<eos_r>"]
tokenizer.add_special_tokens({'additional_special_tokens': special_tokens})
model.resize_token_embeddings(len(tokenizer))
def tokenize_function(examples):
question = "<sos_u> "+examples["text"]+" <eos_u>"
answer = "<sos_r> "+examples["reply"]+" <eos_r>"
return tokenizer(question+answer)
tokenized_datasets = parsed_datasets.map(
tokenize_function,
num_proc=8,
batched=True,
remove_columns=["id", "text", "reply"])
# remove_columns=["id", "text", "reply", "__index_level_0__"])
def group_texts(examples):
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
total_length = (total_length // BLOCK_SIZE) * BLOCK_SIZE
result = {
k: [t[i : i + BLOCK_SIZE] for i in range(0, total_length, BLOCK_SIZE)]
for k, t in concatenated_examples.items() }
result["labels"] = result["input_ids"].copy()
return result
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=4)
training_args = TrainingArguments(
"test-clm",
evaluation_strategy="epoch",
per_device_train_batch_size=8,
learning_rate=2e-5,
weight_decay=0.01,
warmup_steps=2000,
num_train_epochs=20,
report_to="wandb",
save_strategy="epoch"
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=lm_datasets["train"],
eval_dataset=lm_datasets["validation"],
)
trainer.train()
eval_results = trainer.evaluate()
print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")
|
jadermcs/ZeCarioca
|
processdialog.py
|
<reponame>jadermcs/ZeCarioca
import json
import tqdm
import random
from transformers import GPT2Tokenizer, GPT2LMHeadModel
random.seed(42)
with open("synthetic.augmented.json") as fin:
data = json.load(fin)
tokens = data['ontology']['intents'] + data['ontology']['actions'] + ["<sos_u>", "<sos_b>", "<sos_a>", "<sos_r>", "<eos_u>", "<eos_b>", "<eos_a>", "<eos_r>"]
dialogues = []
for d in tqdm.tqdm(data['dialogs']):
dialog = ''
for turn in range(len(d['turns'])//2):
t = d['turns'][turn*2:turn*2+2]
utterance = f"<sos_u> {t[0]['utterance'].lower()} <eos_u>"
intents = []
for slot in t[0]['slot-values']:
if isinstance(t[0]['slot-values'][slot], list):
parse = [[slot, v] for v in t[0]['slot-values'][slot]]
intents += [item for sublist in parse for item in sublist]
else:
intents += [slot, t[0]['slot-values'][slot]]
bs = [t[0]['intent']] + intents
belief = "<sos_b> " + " ".join(bs).lower() + " <eos_b>"
action = "<sos_a> " + t[1]['action'] + " <eos_a>"
response = f"<sos_r> {t[1]['utterance_delex']} <eos_r>"
dialog += utterance+belief+action+response
dialogues.append({'id':d['id'], 'text':dialog})
random.shuffle(dialogues)
f1 = open("data/process.train.json", "w")
f2 = open("data/process.valid.json", "w")
f3 = open("data/ontology.json", "w")
json.dump(tokens, f3)
c1, c2 = 0, 0
for i, line in enumerate(dialogues):
if not line['id'].endswith(("1", "2", "3")):
print(json.dumps(line), file=f1)
c1 +=1
else:
print(json.dumps(line), file=f2)
c2 +=1
print("train size:", c1, "test size:", c2)
f1.close()
f2.close()
|
Pebaz/RGB
|
src/main.py
|
<filename>src/main.py
"""
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This program has multiple stages:
1. Take a given input header(s) and produce formatted:
- Output.txt
2. Parse this file to obtain only the parts Red/System will care about.
3. Generate a corresponding Red/System import "header" file that can be used
in a Red program by simply importing the "header" file.
"""
import os # Basename
import fire # CLI framework
import format # Launching the formatter
from rgb import RGB # RGB compiler class
class CLI:
"""
Red Generator of Bindings's command line interface.
"""
def gen(self, header, llvm_dir, dynlib=None, out_file=None,
call_con='cdecl', include_dir='.', debug=False):
"""
Generate a Red/System binding file from the given header input.
Args:
header(str): the header to parse.
llvm_dir(str): the binary directory where LLVM lives
dynlib(str): the dynamic library target, used in the outfile
out_file(str): the path/name.ext of the output file
call_con(str): the calling convention of the library
include_dir(str): where other include files are, same as C
debug(bool): whether verbose debugging should occur
"""
# Clean up the header file and obtain all declarations/pound defines
declarations = format.format_header(header, llvm_dir, include_dir)
# Fix null dynamic lib name
if dynlib == None:
# Since the extension doesn't matter, just add a dot to it
dynlib = os.path.basename(header).split('.')[0] + '.'
# Both parse and generate the declarations
rgb_compiler = RGB(declarations, dynlib, call_con, out_file)
rgb_compiler.compile()
if __name__ == '__main__':
fire.Fire(CLI)
|
Pebaz/RGB
|
src/compilers/pound_define.py
|
<filename>src/compilers/pound_define.py
"""
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pyparsing import * # Best parsing library for Python
from red_utils import * # Tools for parsing C/Red/System code
from parse_utils import * # Tools for parsing in general
# Configuration Variables for testing
RGB_ENABLE_EMPTY_DEF = True
RGB_ENABLE_PAREN_INTEGER = True
RGB_ENABLE_PAREN_UNKNOWN = True
RGB_ENABLE_SIMPLE_DEF = True
RGB_ENABLE_POSSIBLE_C_CODE = True
RGB_ENABLE_WARNINGS = True
RGB_ENABLE_NESTED_MACRO_CALL = True
class PoundDefineCompiler:
"""
Parses a Pound Define from C code and generates a Red/System version of it.
Attributes:
line(str): the line (or lines) of C code to parse.
result(str): the resulting Red/System code from parsing the C code.
"""
def __init__(self, line):
"""
Constructor.
Args:
line(str): the line to parse.
"""
self.line = line
self.result = None
def parse(self):
"""
Split the line into its constituent parts for code generation.
"""
try:
self.result = PoundDefine.parseString(self.line)
except Exception as e:
print(e)
print(self.line)
def generate(self, file):
"""
Generate a Red/System version of the parsing result.
Args:
file(file): the already-opened file to write the Red/System code.
"""
res = list(self.result)
# DONE {
# Simplest possible case
if len(res) == 2:
if RGB_ENABLE_EMPTY_DEF:
file.write(f'{res[0]} {res[1]} []\n')
# A pound define with a single value in parentheses
elif len(res) == 5 and res[2] == '(' and res[4] == ')':
# If it's a number, it will be easy to fix
try:
num = Number.parseString(res[3])
name = res[1]
if RGB_ENABLE_PAREN_INTEGER:
# Integer
if num.Integer != '':
file.write(f'{res[0]} {name} {num[0]}\n')
# Hex Number
elif num.HexNumber != '':
file.write(f'{res[0]} {name} {fix_hex_num(num[0])}\n')
# Try to strip the parentheses and write the definition to file.
except:
if RGB_ENABLE_PAREN_UNKNOWN:
print(
f'WARNING: {res[1]}\nhas a potentially-flawed'
'binding, check for accuracy.'
)
file.write(f'{res[0]} {res[1]} {res[3]}')
# }
# Potentially raw C code
elif RGB_ENABLE_POSSIBLE_C_CODE and res[2] == '(':
c_code_warning(
file,
f'{res[0]} {res[1]} [{" ".join(res[2:])}]',
self.line
)
# Has to be #define PBZ 11
elif RGB_ENABLE_SIMPLE_DEF and len(res) == 3:
file.write(f'{res[0]} {res[1]} {res[2]}\n')
else:
# If it starts with a '(', it is most likely C code
if RGB_ENABLE_POSSIBLE_C_CODE and '(' not in res and ')' not in res:
c_code_warning(
file,
f'{res[0]} {res[1]} [{" ".join(res[2:])}]',
self.line
)
# Contains a nested macro call
elif (
RGB_ENABLE_NESTED_MACRO_CALL and
#'__pragma' not in res and
'(' in res and
')' in res and
res[1] != '('
):
if '__pragma' in res:
print('-' * 80)
print(
' WARNING! This line contains a pragma, please review.'
'\nHeader:\n'
)
print('', self.line)
print('-' * 80 + '\n\n')
name = res[1]
# Solve recursive pound defines by initially defining the name
# as an empty block
if res.count(name) > 1:
file.write(
f'\n; The following {name} definition refers to'
' itself so Red/System needs this empty definition\n'
)
file.write(f'#define {name} []\n')
out = res[3:]
# NOTE(Pebaz) Simple check for sanity
if out[-1] != ')' or out[0] != '(':
#raise Exception(f'Paren `)` error. {out}\n {res}')
c_code_warning(
file,
f'{res[0]} {res[1]} [{" ".join(res[2:])}]',
self.line
)
# Surround individual arguments with parentheses for Red/System
for i in range(len(out)):
sym = out[i]
if sym not in '(),':
out[i] = f'({sym})'
# Remove all the commas from the macro
while ',' in out:
out.remove(',')
res.insert(2, '[')
res.append(']')
file.write(f'{res[0]} {res[1]} [ {res[3]} ')
file.write(' '.join(out))
file.write(']\n')
# Can only be C code from here on out because it contains a pragma
else:
# raise Exception(
# 'Should never get here!\n'
# f'Offending line: {self.line}'
# )
c_code_warning(
file,
f'UNSUPPORTED SYNTAX: CREATE AN ISSUE ON GITHUB.\n{res[0]} {res[1]} [{" ".join(res[2:])}]',
self.line
)
|
Pebaz/RGB
|
src/compilers/struct.py
|
<gh_stars>10-100
"""
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pyparsing import * # Best parsing library for Python
from pycparser import CParser # For parsing C a little easier
from red_utils import * # Tools for parsing C/Red/System code
from parse_utils import * # Tools for parsing in general
RGB_ENABLE_STDOUT = False
class StructCompiler:
"""
Parses a Struct from C code and generates a Red/System version of it.
Attributes:
line(str): the line (or lines) of C code to parse.
result(str): the resulting Red/System code from parsing the C code.
"""
def __init__(self, line):
"""
Constructor.
Args:
line(str): the line to parse.
"""
self.line = line
self.result = None
def parse(self):
"""
Split the line into its constituent parts for code generation.
"""
self.result = ''
w = ' ' * 40
# List of structure names and the current index level to access them
struct_name = ['']
struct_nest = 0
# The result of each line
res = None
for line in self.line.split('\n'):
if line.strip() == '': continue
line = line.replace('\r', '')
tab = ' ' * struct_nest
if '{' in line:
tab = ' ' * (struct_nest - 1)
res = tab + '['
if RGB_ENABLE_STDOUT:
print(line + w[len(line):], res)
# Individual Declarations
elif ';' in line and '}' not in line:
# Inline struct value
if 'struct' in line and '*' not in line:
vals = line.split(' ')
nme = vals[-1].replace(';', '')
strct = vals[-2]
res = tab + f'{nme} [{strct} value]'
if RGB_ENABLE_STDOUT:
print(line + w[len(line):], res)
continue
# Pointer to struct
elif 'struct' in line and line.count('*') == 1:
vals = line.split(' ')
nme = vals[-1].replace('*', '').replace(';', '')
strct = vals[-2]
res = tab + f'{nme} [{strct}]'
if RGB_ENABLE_STDOUT:
print(line + w[len(line):], res)
continue
# Anything past here is a normal declaration (remove struct)
line.replace('struct', '')
# Handle Multiple Declarations on one line
the_decl = list(Decl.parseString(line.strip()))
end_of_type = None # Index where the type ends in list
# Get the index of where the type ends
if '*' in the_decl:
end_of_type = (len(the_decl) - the_decl[::-1].index('*'))
# The first item _is_ the type
else:
end_of_type = 1
# ['int', '*', <-- 'x', ',', 'y', ',', 'z']
the_type = the_decl[:end_of_type]
values = split_list(the_decl[end_of_type:], ',')
'''
Since the `argument` function needs only one type + id,
add the type to each and every var and handle them separately
'''
values = [the_type + i for i in values]
for value in values:
had_to_make_affordances_for_red_system_again = False
# Handle pointers
if '*' in value:
# Only cap it to a single pointer if the decl has > 1
if value.count('*') > 1:
value = 'int', '*', value[-1]
had_to_make_affordances_for_red_system_again = True
res = tab + argument(value)
if had_to_make_affordances_for_red_system_again:
res += f' ; {line.strip()}'
if RGB_ENABLE_STDOUT:
print(line + w[len(line):], res)
# Manually add this res so that each variable can be added
self.result += res + '\n'
'''
Since we manually added each var, we don't need to add the
last one again
'''
res = ''
elif '}' in line:
tab = ' ' * (struct_nest - 1)
ender = StructEnd.parseString(line.strip())[0]
if struct_nest > 1:
res = tab + '] value]'
if RGB_ENABLE_STDOUT:
print(line + w[len(line):], res)
else:
res = tab + ']'
if RGB_ENABLE_STDOUT:
print(line + w[len(line):], res)
struct_nest -= 1
struct_name.pop()
else:
struct_nest += 1
struct_name.append(StructStart.parseString(line.strip())[0])
pre = line + w[len(line):]
if struct_nest > 1:
res = tab + f'{struct_name[struct_nest]} [struct!'
if RGB_ENABLE_STDOUT:
print(pre, res)
else:
res = tab + f'{struct_name[struct_nest]}!: alias struct!'
if RGB_ENABLE_STDOUT:
print(pre, res)
# After each iteration, add the result to self.result
self.result += res + '\n'
def generate(self, file):
"""
Generate a Red/System version of the parsing result.
Args:
file(file): the already-opened file to write the Red/System code.
"""
file.write('; Please check for accuracy:\n')
file.write(f'{self.result}\n\n\n')
|
vlevorato/covid-ml-models
|
covid_ml/ml/feature_engineering.py
|
<gh_stars>1-10
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from dsbox.ml.feature_engineering.timeseries import RollingWindower, Shifter
def preprocess_location(dataframe, location='France'):
return dataframe[dataframe['location'] == location]
def preprocess_tests(dataframe):
dataframe = dataframe[dataframe['cl_age90'] == 0]
dataframe['date'] = dataframe['jour']
del dataframe['jour']
return dataframe
def preprocess_kpis(dataframe, date_col):
dataframe = dataframe.replace('NA', np.nan)
for col in dataframe.columns:
if col != date_col:
dataframe[col] = dataframe[col].astype('float')
return dataframe
def prepare_data(dataframe, data_file=None, date_col='date'):
print('DF shape: {}'.format(dataframe.shape))
if data_file == 'owid_data':
dataframe = preprocess_location(dataframe)
if data_file == 'datagov_tests_data':
dataframe = preprocess_tests(dataframe)
if data_file == 'datagov_kpis_data':
dataframe = preprocess_kpis(dataframe, date_col)
dataframe['date'] = pd.to_datetime(dataframe[date_col])
dataframe = dataframe.resample('D', on='date').mean().reset_index(drop=False)
dataframe = dataframe.interpolate(limit_area='inside')
return dataframe
def merge_data(dataframe_list, merge_col='date'):
dataframe = dataframe_list[0]
for i in range(1, len(dataframe_list)):
dataframe = dataframe.merge(dataframe_list[i], on=merge_col, how='left')
return dataframe
def create_features(dataframe, date_col='date', predict_period_days=15, predict_period_week_round=False,
cols_to_shift=None, agg_ops=None, rolling_windows=None, shift_rolling_windows=None):
dataframe = dataframe.sort_values(date_col)
dataframe['new_tests_source'] = dataframe['new_tests']
dataframe['new_tests'] = dataframe[['new_tests', 'T']].mean(axis=1)
dataframe['reproduction_rate_source'] = dataframe['reproduction_rate']
dataframe['reproduction_rate'] = dataframe[['reproduction_rate', 'R']].mean(axis=1)
dataframe['total_cas_confirmes_1'] = dataframe['total_cas_confirmes'].shift(1)
dataframe['new_cases_2'] = dataframe['total_cas_confirmes'] - dataframe['total_cas_confirmes_1']
dataframe['new_cases_2'] = dataframe['new_cases_2'].map(lambda x: np.nan if x <= 0 else x)
dataframe['new_cases_2'] = dataframe['new_cases_2'].interpolate()
dataframe['prop_cases_vs_tests'] = dataframe['new_cases_2'] / dataframe['new_tests'].shift(1)
dataframe['new_patients_gueris'] = dataframe['total_patients_gueris'] - \
dataframe['total_patients_gueris'].shift(1)
now_date = datetime.now().date()
dates_to_predict = []
for day_shift in range(0, predict_period_days):
dates_to_predict.append(now_date + timedelta(days=day_shift))
if predict_period_week_round:
while (now_date + timedelta(days=day_shift)).weekday() != 6:
day_shift += 1
dates_to_predict.append(now_date + timedelta(days=day_shift))
df_to_predict = pd.DataFrame({date_col: dates_to_predict})
df_to_predict[date_col] = pd.to_datetime(df_to_predict[date_col])
dataframe = pd.concat([dataframe, df_to_predict], sort=False).reset_index(drop=True)
"""
Misc features
"""
dataframe['weekday'] = dataframe['date'].map(lambda d: pd.to_datetime(d).weekday())
"""
Rolling windows shifted and diff features
"""
df_roll = None
for op in agg_ops:
rolling_windower = RollingWindower(operation=op, windows=rolling_windows)
if df_roll is None:
df_roll = rolling_windower.fit_transform(dataframe[cols_to_shift])
else:
df_roll = df_roll.join(rolling_windower.fit_transform(dataframe[cols_to_shift]))
shifter = Shifter(shifts=shift_rolling_windows)
df_roll_shift = shifter.fit_transform(df_roll)
for col in cols_to_shift:
for i in range(1, len(shift_rolling_windows)):
df_roll_shift['diff_mean_7_{}_{}_{}'.format(col, shift_rolling_windows[i - 1], shift_rolling_windows[i])] = \
df_roll_shift['mean_7_{}_{}'.format(col, shift_rolling_windows[i - 1])] - df_roll_shift[
'mean_7_{}_{}'.format(col, shift_rolling_windows[i])]
dataframe = dataframe.join(df_roll_shift)
return dataframe
|
vlevorato/covid-ml-models
|
tests/test_workflow_covidml_datascience.py
|
from dsbox.utils import execute_dag
from workflow import workflow_covidml_datascience
execute_dag(workflow_covidml_datascience.dag, verbose=True)
|
vlevorato/covid-ml-models
|
covid_ml/config/env_vars.py
|
<reponame>vlevorato/covid-ml-models
from airflow.models import Variable
import os
"""
Using dictionary for config variables to be able to change the way to assign values.
(by default, using environment variables).
"""
config_variables = dict()
env_list = [
'COVIDML_PROJECT_PATH',
'COVIDML_DATA_PATH',
'COVIDML_MODEL_PATH',
'COVIDML_GCP_KEY_PATH',
'COVIDML_BQ_DATASET',
'COVIDML_BQ_CONN_ID'
]
for env_var in env_list:
config_variables[env_var] = os.environ.get(env_var)
for env_var in env_list:
if config_variables[env_var] is None:
config_variables[env_var] = Variable.get(env_var)
|
vlevorato/covid-ml-models
|
covid_ml/ml/ml_metadata.py
|
cols_to_shift = ['prop_cases_vs_tests',
'new_cases_2',
'new_tests',
'nouveaux_patients_reanimation',
'nouveaux_patients_hospitalises',
'new_patients_gueris',
'reproduction_rate',
'new_deaths']
ref_ops = {'mean': 'moyenne',
'median': 'médiane',
'std': 'écart-type',
'min': 'minimum',
'max': 'maximum'}
ref_cols = {'new_cases_2': 'Nouveaux cas',
'nouveaux_patients_hospitalises': 'Nouveaux patients hospitalisés',
'nouveaux_patients_reanimation': 'Nouveaux patients en réanimation',
'new_deaths': 'Nouveaux décès',
'prop_cases_vs_tests': 'Proportion de cas en fonction des tests',
'new_tests': 'Nouveaux tests',
'new_patients_gueris': 'Nouveaux patients guéris',
'reproduction_rate': 'Taux de reproduction (Rt)'
}
ref_features = {}
agg_ops = ['mean', 'median', 'std', 'min', 'max']
rolling_windows = [3, 7, 14, 28]
shift_rolling_windows = [14, 21, 28]
cols_to_keep = []
for col in cols_to_shift:
for i in range(1, len(shift_rolling_windows)):
feature = 'diff_mean_7_' + col + '_' + str(shift_rolling_windows[i - 1]) + '_' + str(shift_rolling_windows[i])
cols_to_keep.append(feature)
ref_features[feature] = '{} - écart entre la moyenne sur 7j (-{}j) et (-{}j)'.format(ref_cols[col],
shift_rolling_windows[
i - 1],
shift_rolling_windows[i])
for agg_op in agg_ops:
for rolling_window in rolling_windows:
for shift_rolling_window in shift_rolling_windows:
feature = '{}_{}_{}_{}'.format(agg_op, rolling_window, col, shift_rolling_window)
cols_to_keep.append(feature)
ref_features[feature] = '{} - {} sur {}j (-{}j)'.format(ref_cols[col], ref_ops[agg_op], rolling_window,
shift_rolling_window)
model_types = ['gbt', 'rf', 'et'] # , 'bridge', 'elastic_net' , 'knn']
targets = ['new_cases_2', 'nouveaux_patients_hospitalises', 'nouveaux_patients_reanimation', 'new_deaths']
target_feature_selection_method_dict = {'new_cases_2': 'permutation_importance',
'nouveaux_patients_hospitalises': 'permutation_importance',
'nouveaux_patients_reanimation': 'permutation_importance',
'new_deaths': 'permutation_importance'}
ref_models = {'rf': 'Random Forest',
'gbt': 'Gradient Tree Boosting',
'elastic_net': 'Elastic Net',
'bridge': 'Bayesian Ridge',
'knn': 'K-Nearest Neighbors',
'et': 'Extremely Randomized Trees'
}
|
vlevorato/covid-ml-models
|
tests/test_feature_engineering.py
|
<reponame>vlevorato/covid-ml-models<filename>tests/test_feature_engineering.py
import unittest
import pandas as pd
from pandas.util.testing import assert_frame_equal
from covid_ml.ml.feature_engineering import prepare_data
class TestFE(unittest.TestCase):
def test_prepare_data(self):
# given
df = pd.DataFrame({'date': ['2020-01-01', '2020-01-02', '2020-01-04', '2020-01-06'],
'value': [9, 10, 12, 10]})
# when
df_prepared = prepare_data(df)
# then
df_expected = pd.DataFrame(
{'date': ['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05', '2020-01-06'],
'value': [9., 10., 11., 12., 11., 10.]})
df_expected['date'] = pd.to_datetime(df_expected['date'])
assert_frame_equal(df_expected, df_prepared)
|
vlevorato/covid-ml-models
|
covid_ml/ml/model.py
|
import os
from datetime import datetime
import pandas as pd
import numpy as np
from dsbox.ml.metrics import root_mean_squared_error
from dsbox.operators.data_unit import DataInputUnit
from dsbox.utils import write_object_file, load_object_file
from dsbox.ml.feature_selection.greedy import greedy_feature_selection
from eli5.sklearn import PermutationImportance
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
from sklearn.linear_model import BayesianRidge, ElasticNet
from sklearn.metrics import make_scorer
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import MinMaxScaler
def create_model(model_type='elastic_net'):
if model_type == 'rf':
return RandomForestRegressor(n_estimators=300,
min_samples_leaf=2,
max_depth=15,
max_features=0.8,
max_samples=0.8,
n_jobs=1)
if model_type == 'et':
return ExtraTreesRegressor(n_estimators=300, min_samples_leaf=2, max_depth=7, max_features=0.8, n_jobs=1)
if model_type == 'gbt':
return GradientBoostingRegressor(n_estimators=500,
learning_rate=0.01,
random_state=42)
if model_type == 'bridge':
return BayesianRidge(normalize=True)
if model_type == 'elastic_net':
return ElasticNet(normalize=True, max_iter=100000, l1_ratio=0.95)
if model_type == 'knn':
return KNeighborsRegressor(n_neighbors=120, p=3, leaf_size=120, n_jobs=1)
def feature_contribution(model, features, model_type='elastic_net'):
linear_models_type = ['elastic_net', 'bridge']
ensemble_models_type = ['rf', 'gbt', 'et']
if model_type in linear_models_type:
df_features_contrib = pd.DataFrame({'feature': features, 'importance': model.coef_})
return df_features_contrib
if model_type in ensemble_models_type:
df_features_contrib = pd.DataFrame(
{'feature': features, 'importance': model.feature_importances_})
return df_features_contrib
df_features_contrib = pd.DataFrame(
{'feature': features, 'importance': [np.nan] * len(features)})
return df_features_contrib
def extract_feature_contribution(df_features, model_type_data_unit=None, model_path=None, target=None):
df_model_type = model_type_data_unit.read_data()
model_type = df_model_type['model_type'].values[0]
print("Model type: {}".format(model_type))
model = load_object_file(model_path + generate_model_filename(model_type, target))
df_features_contrib = feature_contribution(model, df_features['features'], model_type=model_type)
return df_features_contrib
def generate_model_filename(model_type, target):
return model_type + '_' + target + '.model'
def check_features(features, col_name='features'):
if isinstance(features, DataInputUnit):
df_features = features.read_data()
features = list(df_features[col_name].values)
return features
def prepare_data(dataframe, features=None):
mm_scaler = MinMaxScaler()
dataframe[features] = mm_scaler.fit_transform(dataframe[features])
return dataframe
def train(dataframe, date_col='date', model_type_data_unit=None, model_path=None, target=None, features=None,
split_date=None):
df_model_type = model_type_data_unit.read_data()
model_type = df_model_type['model_type'].values[0]
print("Model type: {}".format(model_type))
features = check_features(features)
dataframe = prepare_data(dataframe, features=features)
X = dataframe.dropna(subset=features + [target])
X = X[X[target] > 0].reset_index(drop=True)
if split_date is not None:
X = X[X[date_col] < split_date]
print("Train dataset min: {}".format(X[date_col].min()))
print("Train dataset max: {}".format(X[date_col].max()))
model = create_model(model_type=model_type)
model.fit(X[features], X[target])
model_path += generate_model_filename(model_type, target)
write_object_file(model_path, model)
def predict(dataframe, date_col='date', model_type_data_unit=None, model_path=None, target=None, features=None,
y_pred_col='y_pred', split_date=None):
df_model_type = model_type_data_unit.read_data()
model_type = df_model_type['model_type'].values[0]
print("Model type: {}".format(model_type))
features = check_features(features)
dataframe = prepare_data(dataframe, features=features)
dataframe[features] = dataframe[features].fillna(method='ffill') \
.fillna(method='bfill')
X_to_predict = dataframe
if split_date is None:
X_to_predict = X_to_predict[pd.isnull(X_to_predict[target])]
split_date = str(datetime.now().date())
X_to_predict = X_to_predict[X_to_predict[date_col] >= split_date]
print("Predict dataset min: {}".format(X_to_predict[date_col].min()))
print("Predict dataset max: {}".format(X_to_predict[date_col].max()))
model = load_object_file(model_path + generate_model_filename(model_type, target))
y_pred = model.predict(X_to_predict[features])
X_to_predict[y_pred_col] = y_pred
X_to_predict[y_pred_col] = X_to_predict[y_pred_col].map(lambda x: 0 if x < 0 else x)
return X_to_predict[[date_col, y_pred_col]]
def model_selection(dataframe, model_list, date_col='date', split_date=None, max_date=None, target=None, features=None,
score_func=root_mean_squared_error, cum_sum=False):
dataframe = prepare_data(dataframe, features=features)
X = dataframe.dropna(subset=features + [target])
X_train = X[X[date_col] < split_date]
X_test = X[X[date_col] >= split_date]
if max_date is not None:
X_test = X_test[X_test[date_col] < max_date]
print("Train dataset min: {}".format(X_train[date_col].min()))
print("Train dataset max: {}".format(X_train[date_col].max()))
print("Test dataset min: {}".format(X_test[date_col].min()))
print("Test dataset max: {}".format(X_test[date_col].max()))
best_score = None
best_model_type = None
for model_type in model_list:
print("Testing: {}".format(model_type))
model = create_model(model_type)
model.fit(X_train[features], X_train[target])
y_test = model.predict(X_test[features])
if cum_sum:
score = score_func([X_test[target].cumsum().values[-1]], [np.cumsum(y_test)[-1]])
else:
score = score_func(X_test[target], y_test)
print("Score: {}".format(score))
if best_score is None or score < best_score:
best_score = score
best_model_type = model_type
print("Best model: {}".format(best_model_type))
df_best_model_type = pd.DataFrame({'target': [target], 'model_type': [best_model_type], 'score': [best_score]})
return df_best_model_type
def permutation_importance_select_features(cols_to_test, model, df, target, score_func=root_mean_squared_error):
scorer = make_scorer(score_func)
perm = PermutationImportance(model, scoring=scorer, n_iter=3).fit(df[cols_to_test], df[target])
perm_importance = pd.DataFrame({'feature': cols_to_test, 'importance': perm.feature_importances_}).sort_values(
'importance', ascending=False)
perm_importance = perm_importance[perm_importance['importance'] >= 0]
return list(perm_importance['feature'].values)
def feature_selection(dataframe, date_col='date', split_date=None, max_date=None, model_type_data_unit=None,
method='greedy', score_func=root_mean_squared_error, target=None, features=None):
df_model_type = model_type_data_unit.read_data()
model_type = df_model_type['model_type'].values[0]
print("Model type: {}".format(model_type))
dataframe = prepare_data(dataframe, features=features)
X = dataframe.dropna(subset=features + [target])
X_train = X[X[date_col] < split_date]
X_test = X[X[date_col] >= split_date]
if max_date is not None:
X_test = X_test[X_test[date_col] < max_date]
print("Train dataset min: {}".format(X_train[date_col].min()))
print("Train dataset max: {}".format(X_train[date_col].max()))
print("Test dataset min: {}".format(X_test[date_col].min()))
print("Test dataset max: {}".format(X_test[date_col].max()))
cols_selected = features
model = create_model(model_type)
if method == 'greedy':
cols_selected = greedy_feature_selection(X_train, X_test, X_train[target], X_test[target], model,
features, score_func)
if method == 'permutation_importance' or method == 'filter_zero_coeff':
model.fit(X_train[features], X_train[target])
score = score_func(X_test[target], model.predict(X_test[features]))
print("Original score: {}".format(score))
if method == 'permutation_importance':
cols_selected = permutation_importance_select_features(features, model, X_test, target)
if method == 'filter_zero_coeff':
df_features_contrib = feature_contribution(model, features, model_type=model_type)
if len(df_features_contrib.dropna()) == 0:
cols_selected = features
else:
cols_selected = list(df_features_contrib[df_features_contrib['importance'] > 0]['feature'])
model.fit(X_train[cols_selected], X_train[target])
new_score = score_func(X_test[target], model.predict(X_test[cols_selected]))
print("New score: {}".format(new_score))
if new_score >= score:
print("No optim found :(")
cols_selected = features
if method == 'no_selection':
cols_selected = features
print('Features selected: {}'.format(len(cols_selected)))
df_features = pd.DataFrame(cols_selected)
df_features.columns = ['features']
return df_features
def check_if_new_features_gives_better_model(data_unit, date_col='date', model_type_data_unit=None, target=None,
current_features=None, candidates_features=None, split_date=None,
score_func=root_mean_squared_error, task_id_update=None,
task_id_skip=None):
if not os.path.isfile(current_features.input_path):
print("No features present.")
return task_id_update
df_model_type = model_type_data_unit.read_data()
model_type = df_model_type['model_type'].values[0]
print("Model type: {}".format(model_type))
dataframe = data_unit.read_data()
current_features = check_features(current_features)
candidates_features = check_features(candidates_features)
dataframe = prepare_data(dataframe, features=current_features + candidates_features)
X = dataframe.dropna(subset=[target])
X_train = X[X[date_col] < split_date]
X_validation = X[X[date_col] >= split_date]
print("Train dataset min: {}".format(X_train[date_col].min()))
print("Train dataset max: {}".format(X_train[date_col].max()))
print("Validation dataset min: {}".format(X_validation[date_col].min()))
print("Validation dataset max: {}".format(X_validation[date_col].max()))
current_model = create_model(model_type=model_type)
current_model.fit(X_train[current_features].dropna(), X_train.dropna(subset=current_features)[target])
y_pred_current = current_model.predict(X_validation[current_features].dropna())
current_score = score_func(X_validation.dropna(subset=current_features)[target], y_pred_current)
print("Current score: {}".format(current_score))
new_model = create_model(model_type=model_type)
new_model.fit(X_train[candidates_features].dropna(), X_train.dropna(subset=candidates_features)[target])
y_pred_new = new_model.predict(X_validation[candidates_features].dropna())
new_score = score_func(X_validation.dropna(subset=candidates_features)[target], y_pred_new)
print("New score: {}".format(new_score))
if new_score < current_score:
print("Better model found!")
return task_id_update
else:
print("No better model found...")
return task_id_skip
|
vlevorato/covid-ml-models
|
covid_ml/utils/bq_generation.py
|
<filename>covid_ml/utils/bq_generation.py
import pandas as pd
def generate_data_viz_query(template_query, joining_field='date',
bq_dataset=None, targets=None):
pre_query = 'WITH'
for target in targets:
pre_query += ' predictions_data_{0} AS ' \
'( SELECT ' \
'{1}, ' \
'CAST(AVG(y_pred) AS INT64) as {0}_pred ' \
'FROM `{2}.predictions_last` as predictions_last ' \
"WHERE target = '{0}' " \
"GROUP BY predictions_last.{1} ),".format(target, joining_field, bq_dataset)
pre_query = pre_query[:-1]
join_query = ''
for target in targets:
join_query += 'FULL OUTER JOIN predictions_data_{}\n'.format(target)
join_query += 'USING ({})\n'.format(joining_field)
query = template_query.format(bq_dataset, pre_query, join_query)
return query
def generate_data_viz_raw_query(template_query, joining_field='date',
bq_dataset=None, targets=None):
pre_query = 'WITH'
for target in targets:
pre_query += ' predictions_data_{0} AS ' \
'( SELECT ' \
'{1}, ' \
'date_export as date_export_{0}, ' \
'CAST(y_pred AS INT64) as {0}_pred, ' \
'model as model_{0} ' \
'FROM `{2}.predictions` as predictions ' \
"WHERE target = '{0}'),".format(target, joining_field, bq_dataset)
pre_query = pre_query[:-1]
join_query = ''
for target in targets:
join_query += 'FULL OUTER JOIN predictions_data_{}\n'.format(target)
join_query += 'USING ({})\n'.format(joining_field)
query = template_query.format(bq_dataset, pre_query, join_query)
return query
def generate_referential(ref_dict):
variable_names = ref_dict.keys()
col_names = ref_dict.values()
df_ref = pd.DataFrame({'variable': variable_names, 'libelle': col_names})
return df_ref
|
vlevorato/covid-ml-models
|
workflow/workflow_covidml_datascience.py
|
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import BranchPythonOperator
from airflow.operators.trigger_dagrun import TriggerDagRunOperator
from airflow.utils.task_group import TaskGroup
from dsbox.operators.data_operator import DataOperator
from dsbox.operators.data_unit import DataInputFileUnit, DataOutputFileUnit, DataInputMultiFileUnit
from covid_ml.config.commons import dag_args, data_paths
from covid_ml.config.env_vars import config_variables
from covid_ml.ml.feature_engineering import prepare_data, merge_data, create_features
from covid_ml.ml.ml_metadata import cols_to_shift, agg_ops, rolling_windows, shift_rolling_windows, cols_to_keep, \
target_feature_selection_method_dict, targets, model_types
from covid_ml.ml.model import train, predict, feature_selection, check_if_new_features_gives_better_model, \
extract_feature_contribution, model_selection
dag = DAG(dag_id='covidml_data_science',
default_args=dag_args,
description='Data Science workflow for train-predict Covid insights',
schedule_interval='5 0 * * *', # every day at 00:05 am
catchup=False)
split_date_feature_selection_test = datetime.now() - timedelta(days=40)
split_date_feature_selection_validation = datetime.now() - timedelta(days=15)
split_date_for_train_predict = None
"""
Data prep
"""
data_files_to_prepare = ['owid_data', 'datagov_data', 'datagov_tests_data', 'datagov_kpis_data']
task_group_prepare_data = TaskGroup("Prepare_data", dag=dag)
for data_file in data_files_to_prepare:
input_data_file_unit = DataInputFileUnit(data_paths['raw_data_path'] + data_file + '.csv')
output_data_file_unit = DataOutputFileUnit(data_paths['intermediate_data_path'] + data_file + '.parquet',
pandas_write_function_name='to_parquet')
date_col = 'date'
if data_file == 'datagov_kpis_data':
date_col = 'extract_date'
task_prepare_data = DataOperator(operation_function=prepare_data,
params={'data_file': data_file,
'date_col': date_col},
input_unit=input_data_file_unit,
output_unit=output_data_file_unit,
task_id='Prepare_{}'.format(data_file),
task_group=task_group_prepare_data,
dag=dag)
input_data_multi_files_unit = DataInputMultiFileUnit(
[data_paths['intermediate_data_path'] + data_file + '.parquet' for data_file in data_files_to_prepare],
pandas_read_function_name='read_parquet')
output_merge_unit = DataOutputFileUnit(data_paths['intermediate_data_path'] + 'X_merged.parquet',
pandas_write_function_name='to_parquet')
task_merge_data = DataOperator(operation_function=merge_data,
input_unit=input_data_multi_files_unit,
output_unit=output_merge_unit,
task_id='Merge_data',
dag=dag)
task_group_prepare_data.set_downstream(task_merge_data)
input_data_merged_unit = DataInputFileUnit(data_paths['intermediate_data_path'] + 'X_merged.parquet',
pandas_read_function_name='read_parquet')
output_features_unit = DataOutputFileUnit(data_paths['intermediate_data_path'] + 'X_features.parquet',
pandas_write_function_name='to_parquet')
"""
Feature Engineering
"""
task_fe = DataOperator(operation_function=create_features,
params={'cols_to_shift': cols_to_shift,
'agg_ops': agg_ops,
'rolling_windows': rolling_windows,
'shift_rolling_windows': shift_rolling_windows,
'predict_period_days': 15,
'predict_period_week_round': True},
input_unit=input_data_merged_unit,
output_unit=output_features_unit,
task_id='Feature_engineering',
dag=dag)
task_merge_data.set_downstream(task_fe)
input_data_final_unit = DataInputFileUnit(data_paths['intermediate_data_path'] + 'X_features.parquet',
pandas_read_function_name='read_parquet')
"""
Model Selection
"""
task_group_model_selection = TaskGroup("Model_selection", dag=dag)
for target in targets:
output_model_selection_unit = DataOutputFileUnit(config_variables['COVIDML_MODEL_PATH']
+ 'model_type_{}.csv'.format(target),
pandas_write_function_name='to_csv', index=False)
task_model_selection = DataOperator(operation_function=model_selection,
params={'split_date': split_date_feature_selection_validation,
'model_list': model_types,
'target': target,
'features': cols_to_keep},
input_unit=input_data_final_unit,
output_unit=output_model_selection_unit,
task_group=task_group_model_selection,
task_id='Model_selection_{}'.format(target),
dag=dag)
task_fe.set_downstream(task_group_model_selection)
"""
Feature Selection
"""
task_group_feature_selection = TaskGroup("Feature_selection", dag=dag)
for target in targets:
output_features_selection_unit = DataOutputFileUnit(data_paths['features_candidates_path']
+ 'features_{}.csv'.format(target),
pandas_write_function_name='to_csv', index=False)
input_model_selection_unit = DataInputFileUnit(config_variables['COVIDML_MODEL_PATH']
+ 'model_type_{}.csv'.format(target))
task_feature_selection = DataOperator(operation_function=feature_selection,
params={'split_date': split_date_feature_selection_test,
'max_date': split_date_feature_selection_validation,
'model_type_data_unit': input_model_selection_unit,
'target': target,
'features': cols_to_keep,
'method': target_feature_selection_method_dict[target]},
input_unit=input_data_final_unit,
output_unit=output_features_selection_unit,
task_group=task_group_feature_selection,
task_id='Feature_selection_{}'.format(target),
dag=dag)
task_group_model_selection.set_downstream(task_group_feature_selection)
"""
Train model if none or better one is found
"""
task_train_models = TaskGroup("Train", dag=dag)
task_dummy_start_train = DummyOperator(task_id='Start_train',
task_group=task_train_models,
dag=dag)
for target in targets:
input_model_selection_unit = DataInputFileUnit(config_variables['COVIDML_MODEL_PATH']
+ 'model_type_{}.csv'.format(target))
input_features_selection_unit = DataInputFileUnit(data_paths['features_path']
+ 'features_{}.csv'.format(target))
input_candidates_features_selection_unit = DataInputFileUnit(data_paths['features_candidates_path']
+ 'features_{}.csv'.format(target))
task_check_if_retrain_needed = BranchPythonOperator(python_callable=check_if_new_features_gives_better_model,
op_kwargs={'data_unit': input_data_final_unit,
'model_type_data_unit': input_model_selection_unit,
'target': target,
'current_features': input_features_selection_unit,
'candidates_features': input_candidates_features_selection_unit,
'split_date': split_date_feature_selection_validation,
'task_id_update': '{}.Update_features_{}'.format(
task_train_models.group_id,
target),
'task_id_skip': '{}.Skip_features_update_{}'.format(
task_train_models.group_id,
target)
},
task_id='Check_features_{}'.format(target),
task_group=task_train_models,
dag=dag
)
task_dummy_start_train.set_downstream(task_check_if_retrain_needed)
task_dummy_skip_update_features = DummyOperator(task_id='Skip_features_update_{}'.format(target),
task_group=task_train_models,
dag=dag)
task_copy_new_features = BashOperator(bash_command='cp {} {}'.format(data_paths['features_candidates_path']
+ 'features_{}.csv'.format(target),
data_paths['features_path']),
task_id='Update_features_{}'.format(target),
task_group=task_train_models,
dag=dag)
task_check_if_retrain_needed.set_downstream(task_copy_new_features)
task_check_if_retrain_needed.set_downstream(task_dummy_skip_update_features)
task_train = DataOperator(operation_function=train,
params={'model_type_data_unit': input_model_selection_unit,
'model_path': config_variables['COVIDML_MODEL_PATH'],
'target': target,
'features': input_features_selection_unit,
'split_date': split_date_for_train_predict},
input_unit=input_data_final_unit,
task_group=task_train_models,
trigger_rule='none_failed',
task_id='Train_model_{}'.format(target),
dag=dag)
task_copy_new_features.set_downstream(task_train)
task_dummy_skip_update_features.set_downstream(task_train)
output_features_contrib_unit = DataOutputFileUnit(data_paths['features_path']
+ 'features_contrib_{}.parquet'.format(target),
pandas_write_function_name='to_parquet')
task_extract_feature_contrib = DataOperator(operation_function=extract_feature_contribution,
params={'model_type_data_unit': input_model_selection_unit,
'model_path': config_variables['COVIDML_MODEL_PATH'],
'target': target},
input_unit=input_features_selection_unit,
output_unit=output_features_contrib_unit,
task_group=task_train_models,
task_id='Extract_feature_contribution_{}'.format(target),
dag=dag
)
task_train.set_downstream(task_extract_feature_contrib)
task_group_feature_selection.set_downstream(task_train_models)
"""
Predict
"""
task_predict_models = TaskGroup("Predict", dag=dag)
task_dummy_start_predict = DummyOperator(task_id='Start_predictions',
task_group=task_predict_models,
dag=dag)
for target in targets:
input_model_selection_unit = DataInputFileUnit(config_variables['COVIDML_MODEL_PATH']
+ 'model_type_{}.csv'.format(target))
input_features_selection_unit = DataInputFileUnit(data_paths['features_path']
+ 'features_{}.csv'.format(target))
output_predictions_unit = DataOutputFileUnit(data_paths['intermediate_data_path'] +
'X_predict_{}.parquet'.format(target),
pandas_write_function_name='to_parquet')
task_predict = DataOperator(operation_function=predict,
params={'model_type_data_unit': input_model_selection_unit,
'model_path': config_variables['COVIDML_MODEL_PATH'],
'target': target,
'features': input_features_selection_unit,
'split_date': split_date_for_train_predict},
input_unit=input_data_final_unit,
output_unit=output_predictions_unit,
task_group=task_predict_models,
task_id='Predict_model_{}'.format(target),
dag=dag)
task_dummy_start_predict.set_downstream(task_predict)
task_train_models.set_downstream(task_predict_models)
task_launch_export_predictions_dag = TriggerDagRunOperator(task_id='Trigger_export_predictions_dag',
trigger_dag_id='covidml_export_data_to_bq',
dag=dag)
task_predict_models.set_downstream(task_launch_export_predictions_dag)
|
vlevorato/covid-ml-models
|
covid_ml/utils/io.py
|
<filename>covid_ml/utils/io.py
from datetime import datetime
import pandas as pd
def dummy_function(dataframe):
return dataframe
def export_data(dataframe, model_type_data_unit=None, target=None):
df_model_type = model_type_data_unit.read_data()
model_type = df_model_type['model_type'].values[0]
print("Model type: {}".format(model_type))
dataframe['model'] = model_type
dataframe['target'] = target
dataframe['date_export'] = datetime.now()
dataframe['date_export'] = pd.to_datetime(dataframe['date_export'])
return dataframe
def get_bq_query(query_name, file_path):
bq_file = '{}sql/{}.sql'.format(file_path, query_name)
f = open(bq_file, "r")
query = f.read()
f.close()
return query
|
vlevorato/covid-ml-models
|
workflow/workflow_covidml_export_data_to_bq.py
|
from airflow import DAG
from airflow.providers.google.cloud.operators.bigquery import BigQueryInsertJobOperator
from airflow.utils.task_group import TaskGroup
from dsbox.operators.bq_unit import DataOutputBigQueryUnit
from dsbox.operators.data_operator import DataOperator
from dsbox.operators.data_unit import DataInputFileUnit
from covid_ml.config.commons import dag_args, data_paths
from covid_ml.config.env_vars import config_variables
from covid_ml.ml.ml_metadata import ref_features, ref_models, ref_cols, targets
from covid_ml.utils.bq_generation import generate_data_viz_query, generate_referential
from covid_ml.utils.io import dummy_function, get_bq_query, export_data
dag = DAG(dag_id='covidml_export_data_to_bq',
default_args=dag_args,
description='Workflow exporting prediction data to BQ',
schedule_interval=None,
catchup=False)
path_json_key = config_variables['COVIDML_GCP_KEY_PATH']
bq_dataset = config_variables['COVIDML_BQ_DATASET']
input_histo_data_unit = DataInputFileUnit(data_paths['intermediate_data_path'] +
'X_features.parquet',
pandas_read_function_name='read_parquet')
output_histo_bq_unit = DataOutputBigQueryUnit(table_id='{}.historical_data'.format(bq_dataset),
path_json_key=path_json_key)
task_export_historical_data = DataOperator(operation_function=dummy_function,
input_unit=input_histo_data_unit,
output_unit=output_histo_bq_unit,
task_id='Export_historical_data',
dag=dag)
task_group_export_predictions = TaskGroup("Export_predictions", dag=dag)
for target in targets:
input_model_selection_unit = DataInputFileUnit(config_variables['COVIDML_MODEL_PATH']
+ 'model_type_{}.csv'.format(target))
input_predictions_unit = DataInputFileUnit(data_paths['intermediate_data_path'] +
'X_predict_{}.parquet'.format(target),
pandas_read_function_name='read_parquet')
output_pred_bq_unit = DataOutputBigQueryUnit(table_id='{}.predictions'.format(bq_dataset),
path_json_key=path_json_key,
drop_table=False)
task_export_predictions_data = DataOperator(operation_function=export_data,
params={'model_type_data_unit': input_model_selection_unit,
'target': target},
input_unit=input_predictions_unit,
output_unit=output_pred_bq_unit,
task_group=task_group_export_predictions,
task_id='Export_predictions_{}'.format(target),
dag=dag)
input_features_contrib_unit = DataInputFileUnit(data_paths['features_path']
+ 'features_contrib_{}.parquet'.format(target),
pandas_read_function_name='read_parquet')
output_features_contrib_bq_unit = DataOutputBigQueryUnit(table_id='{}.feature_contribution'.format(bq_dataset),
path_json_key=path_json_key,
drop_table=False)
task_export_features_contribution_data = DataOperator(operation_function=export_data,
params={'model_type_data_unit': input_model_selection_unit,
'target': target},
input_unit=input_features_contrib_unit,
output_unit=output_features_contrib_bq_unit,
task_group=task_group_export_predictions,
task_id='Export_features_contribution_{}'.format(target),
dag=dag)
task_export_predictions_data.set_downstream(task_export_features_contribution_data)
task_export_historical_data.set_downstream(task_group_export_predictions)
data_viz_table_query = generate_data_viz_query(get_bq_query('create_data_viz_table_template',
config_variables['COVIDML_PROJECT_PATH']),
bq_dataset=config_variables['COVIDML_BQ_DATASET'],
targets=targets)
data_viz_raw_table_query = get_bq_query('create_data_viz_raw_table',
config_variables['COVIDML_PROJECT_PATH']).format(
config_variables['COVIDML_BQ_DATASET'])
feature_viz_table_query = get_bq_query('create_feature_viz_table',
config_variables['COVIDML_PROJECT_PATH']).format(
config_variables['COVIDML_BQ_DATASET'])
task_generate_data_viz_table = BigQueryInsertJobOperator(gcp_conn_id=config_variables['COVIDML_BQ_CONN_ID'],
configuration={"query": {"query": data_viz_table_query,
"useLegacySql": "False", }},
task_id='Generate_data_viz_table',
dag=dag)
task_generate_data_viz_raw_table = BigQueryInsertJobOperator(gcp_conn_id=config_variables['COVIDML_BQ_CONN_ID'],
configuration={"query": {"query": data_viz_raw_table_query,
"useLegacySql": "False", }},
task_id='Generate_data_viz_raw_table',
dag=dag)
task_generate_feature_viz_table = BigQueryInsertJobOperator(gcp_conn_id=config_variables['COVIDML_BQ_CONN_ID'],
configuration={"query": {"query": feature_viz_table_query,
"useLegacySql": "False", }},
task_id='Generate_feature_viz_table',
dag=dag)
task_group_export_predictions.set_downstream(task_generate_data_viz_table)
task_group_export_predictions.set_downstream(task_generate_data_viz_raw_table)
task_group_export_predictions.set_downstream(task_generate_feature_viz_table)
output_features_ref_bq_unit = DataOutputBigQueryUnit(table_id='{}.ref_features'.format(bq_dataset),
path_json_key=path_json_key,
drop_table=True)
output_models_ref_bq_unit = DataOutputBigQueryUnit(table_id='{}.ref_models'.format(bq_dataset),
path_json_key=path_json_key,
drop_table=True)
output_cols_ref_bq_unit = DataOutputBigQueryUnit(table_id='{}.ref_cols'.format(bq_dataset),
path_json_key=path_json_key,
drop_table=True)
task_generate_features_referential = DataOperator(operation_function=generate_referential,
params={'ref_dict': ref_features},
output_unit=output_features_ref_bq_unit,
task_id='Generate_features_referential',
dag=dag)
task_generate_models_referential = DataOperator(operation_function=generate_referential,
params={'ref_dict': ref_models},
output_unit=output_models_ref_bq_unit,
task_id='Generate_models_referential',
dag=dag)
task_generate_cols_referential = DataOperator(operation_function=generate_referential,
params={'ref_dict': ref_cols},
output_unit=output_cols_ref_bq_unit,
task_id='Generate_cols_referential',
dag=dag)
task_generate_features_referential.set_downstream(task_generate_feature_viz_table)
task_generate_models_referential.set_downstream(task_generate_feature_viz_table)
task_generate_cols_referential.set_downstream(task_generate_feature_viz_table)
|
vlevorato/covid-ml-models
|
workflow/workflow_covidml_source_data.py
|
from airflow import DAG
from airflow.operators.dummy import DummyOperator
from dsbox.operators.data_operator import DataOperator
from dsbox.operators.data_unit import DataInputFileUnit, DataOutputFileUnit
from covid_ml.config.commons import dag_args, data_paths
from covid_ml.utils.io import dummy_function
dag = DAG(dag_id='covidml_source_data_import',
default_args=dag_args,
description='Data source import',
schedule_interval='0 0 * * *', # every day at 00:00 am
catchup=False)
task_start_import = DummyOperator(task_id='Start_source_data_import',
dag=dag)
input_owid_data_unit = DataInputFileUnit(data_paths['source_data_owid'])
output_owid_data_unit = DataOutputFileUnit(data_paths['raw_data_path'] + 'owid_data.csv', index=False)
task_owid_import = DataOperator(operation_function=dummy_function,
input_unit=input_owid_data_unit,
output_unit=output_owid_data_unit,
task_id='Import_OWID_data',
dag=dag)
input_datagov_data_unit = DataInputFileUnit(data_paths['source_data_gov'])
output_datagov_data_unit = DataOutputFileUnit(data_paths['raw_data_path'] + 'datagov_data.csv', index=False)
task_datagov_import = DataOperator(operation_function=dummy_function,
input_unit=input_datagov_data_unit,
output_unit=output_datagov_data_unit,
task_id='Import_DataGov_data',
dag=dag)
input_datagov_tests_data_unit = DataInputFileUnit(data_paths['source_data_gov_tests'], sep=';')
output_datagov_tests_data_unit = DataOutputFileUnit(data_paths['raw_data_path'] + 'datagov_tests_data.csv', index=False)
task_datagov_tests_import = DataOperator(operation_function=dummy_function,
input_unit=input_datagov_tests_data_unit,
output_unit=output_datagov_tests_data_unit,
task_id='Import_DataGovTests_data',
dag=dag)
input_datagov_kpis_data_unit = DataInputFileUnit(data_paths['source_data_gov_kpis'])
output_datagov_kpis_data_unit = DataOutputFileUnit(data_paths['raw_data_path'] + 'datagov_kpis_data.csv', index=False)
task_datagov_kpis_import = DataOperator(operation_function=dummy_function,
input_unit=input_datagov_kpis_data_unit,
output_unit=output_datagov_kpis_data_unit,
task_id='Import_DataGovKpis_data',
dag=dag)
task_start_import.set_downstream(task_owid_import)
task_start_import.set_downstream(task_datagov_import)
task_start_import.set_downstream(task_datagov_tests_import)
task_start_import.set_downstream(task_datagov_kpis_import)
|
vlevorato/covid-ml-models
|
covid_ml/config/commons.py
|
from datetime import datetime, timedelta
from covid_ml.config.env_vars import config_variables
dag_start_date = datetime(2020, 12, 1)
# airflow common dag args
dag_args = {
'start_date': dag_start_date,
'retries': 1,
'retry_delay': timedelta(minutes=5),
'max_active_runs': 1
}
data_paths = {
'raw_data_path': config_variables['COVIDML_DATA_PATH'] + 'raw_data/',
'source_data_owid': "https://github.com/owid/covid-19-data/raw/master/public/data/owid-covid-data.csv",
'source_data_gov': "https://www.data.gouv.fr/en/datasets/r/d3a98a30-893f-47f7-96c5-2f4bcaaa0d71",
'source_data_gov_tests': "https://www.data.gouv.fr/fr/datasets/r/dd0de5d9-b5a5-4503-930a-7b08dc0adc7c",
'source_data_gov_kpis': "https://www.data.gouv.fr/fr/datasets/r/381a9472-ce83-407d-9a64-1b8c23af83df",
'intermediate_data_path': config_variables['COVIDML_DATA_PATH'] + 'intermediate_data/',
'features_path': config_variables['COVIDML_DATA_PATH'] + 'features/',
'features_candidates_path': config_variables['COVIDML_DATA_PATH'] + 'features_candidates/'
}
"""
Web pages sources:
https://ourworldindata.org/coronavirus-data
https://www.data.gouv.fr/fr/datasets/donnees-relatives-aux-resultats-des-tests-virologiques-covid-19/
https://www.data.gouv.fr/fr/datasets/donnees-relatives-a-lepidemie-de-covid-19-en-france-vue-densemble/
https://www.data.gouv.fr/fr/datasets/indicateurs-de-suivi-de-lepidemie-de-covid-19/
"""
|
Sophia-yu-kk/go-btfs
|
bin/find_dependlib.py
|
#!/usr/bin/env python
"""Script to find go library dependency"""
import os
import re
import subprocess
# start libs
start_libs = (
"go-ipfs-config",
"interface-go-ipfs-core",
"go-path",
"go-libp2p",
"go-multiaddr"
)
def helper(d, pkg, path):
if pkg not in d:
path += " <=== " + pkg
print path
return
if not path:
path += pkg
else:
path += " <=== " + pkg
for v in d.get(pkg):
helper(d, v, path)
def print_dep(d):
print "dependency graph\n"
for key in start_libs:
helper(d, key, "")
def run():
pkgpath = os.environ["GOPATH"] + "/pkg"
# dependency dictionary
dependency_map = {start_lib: [] for start_lib in start_libs}
libs = list(start_libs)
visited_libs = libs
# continue running when libs is not empty
while libs:
next_libs = []
for lib in libs:
print lib
out = subprocess.check_output(
["grep", "-nr", lib, pkgpath],
stderr=subprocess.STDOUT)
if out:
lines = out.split("\n")
for line in lines:
# example as below:
# ./mod/github.com/libp2p/go-libp2p-kad-dht@v0.0.10/pb/message.go:9: ma "github.com/multiformats/go-multiaddr"
ss = line.split(":")
if not ss or len(ss) < 2:
continue
# get filename, check if it is .go file
s = ss[0]
if not s.endswith(".go"):
continue
# check it is not go-multiaddr-***, should be either "/go-multiaddr" or "/go-multiaddr/"
orig = ss[-1]
pos = orig.find(lib)
if pos != -1:
next_pos = pos+len(lib)
if next_pos == len(orig):
continue
elif orig[next_pos] != "\"" and \
orig[next_pos] != "/":
# not end with " or /
continue
else:
continue
# get lib name, add to nexlibs
pos = s.find("@v")
if pos != -1:
sub = s[:pos]
pkg = sub.split("/")[-1]
if pkg not in next_libs and pkg not in visited_libs:
next_libs.append(pkg)
if lib not in dependency_map:
dependency_map[lib] = []
dependency_map[lib].append(pkg)
libs = next_libs
visited_libs.extend(libs)
for key, value in dependency_map.iteritems():
print key, value
print "****** round end ******\n"
print "\ndependency dictionary:\n"
all_change = set()
for key, value in dependency_map.iteritems():
all_change.add(key)
for item in value:
all_change.add(item)
print "\nall", len(all_change), "libs\n"
print all_change
print_dep(dependency_map)
if __name__ == "__main__":
# check GOPATH
if 'GOPATH' not in os.environ:
print "GOPATH not set"
os._exit(1)
run()
|
postgres-ci/example-python-project
|
project_test.py
|
import unittest
from project import sum;
class ProjectTestCase(unittest.TestCase) :
def test_sum(self) :
assets = [
(0, 0, 0),
(2, 1, 1),
(4, 2, 2)
]
for asset in assets :
expected, a, b = asset
self.assertEqual(expected, sum(a, b))
if __name__ == '__main__':
unittest.main()
|
maxwell-k/qbe
|
django_qbe/exports.py
|
# -*- coding: utf-8 -*-
import csv
from collections import OrderedDict, Callable
from io import StringIO, BytesIO
import six
from django.http import StreamingHttpResponse
__all__ = ("formats", )
class FormatsException(Exception):
pass
class Formats(OrderedDict):
def add(self, format):
parent = self
def decorator(func):
if isinstance(func, Callable):
parent.update({format: func})
else:
raise FormatsException("func is not a function.")
return decorator
formats = Formats()
# Taken from http://docs.python.org/library/csv.html#csv-examples
class UnicodeWriter(object):
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, dialect=csv.excel_tab, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = BytesIO() if six.PY2 else StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
def _encode(self, item):
if six.PY2:
encoded = unicode(item).encode('utf-8')
else:
encoded = str(item)
return encoded
def writerow(self, row):
self.writer.writerow([self._encode(s) for s in row])
def get_values(self):
# Fetch UTF-8 output from the queue ...
ret = self.queue.getvalue()
# empty queue
self.queue.truncate(0)
if six.PY2:
return ret.lstrip(b'\0')
return ret.encode('utf-8').lstrip(b'\0')
def writerows(self, rows):
for row in rows:
self.writerow(row)
def base_export(labels, results, dialect=csv.excel_tab):
w = UnicodeWriter(dialect=dialect)
count = 0
w.writerow(labels)
for row in results:
count += 1
w.writerow(row)
if count % 200 == 0:
yield w.get_values()
yield w.get_values()
def make_attachment(response, ext):
response['Content-Disposition'] = 'attachment; filename=export.%s' % ext
return response
@formats.add("csv")
def csv_format(labels, results):
content_type = "text/csv"
return make_attachment(StreamingHttpResponse(base_export(labels, results, dialect=csv.excel), content_type=content_type), "csv")
@formats.add("ods")
def ods_format(labels, results):
content_type = "application/vnd.oasis.opendocument.spreadsheet"
return make_attachment(StreamingHttpResponse(base_export(labels, results, dialect=csv.excel), content_type=content_type), "ods")
@formats.add("xls")
def xls_format(labels, results):
content_type = "application/vnd.ms-excel"
return make_attachment(StreamingHttpResponse(base_export(labels, results, dialect=csv.excel), content_type=content_type), "xls")
|
maxwell-k/qbe
|
django_qbe/savedqueries/models.py
|
<reponame>maxwell-k/qbe
from builtins import object
import pickle
from django.db import models
from django.utils.translation import ugettext_lazy as _
try:
from django.utils.timezone import now
except ImportError:
from datetime import datetime
now = datetime.now
from picklefield.fields import PickledObjectField
class SavedQuery(models.Model):
query_hash = models.CharField(_("hash"), max_length=32, primary_key=True,
editable=False)
name = models.CharField(_("name"), max_length=100)
description = models.TextField(_("description"), blank=True)
query_data = PickledObjectField(protocol=pickle.HIGHEST_PROTOCOL)
date_created = models.DateTimeField(_("date created"), default=now,
editable=False)
date_updated = models.DateTimeField(_("date updated"), editable=False)
class Meta(object):
verbose_name = _("Saved query")
verbose_name_plural = _("Saved queries")
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
self.date_updated = now()
super(SavedQuery, self).save(*args, **kwargs)
|
maxwell-k/qbe
|
django_qbe/settings.py
|
<reponame>maxwell-k/qbe<filename>django_qbe/settings.py
# -*- coding: utf-8 -*-
from django.conf import settings
# admin
QBE_ADMIN = getattr(settings, "QBE_ADMIN", "admin")
QBE_ADMIN_SITE = getattr(settings,
"QBE_ADMIN_SITE", "%s.admin_site" % QBE_ADMIN)
# auth
QBE_ACCESS_FOR = getattr(settings, "QBE_ACCESS_FOR", lambda u: u.is_staff)
# formats to export
QBE_FORMATS_EXPORT = getattr(settings, "QBE_FORMATS_EXPORT", "qbe_formats")
# custom operators
QBE_CUSTOM_OPERATORS = getattr(settings,
"QBE_CUSTOM_OPERATORS", "qbe_operators")
# query form
QBE_ALIASES = getattr(settings, "QBE_ALIASES", False)
QBE_GROUP_BY = getattr(settings, "QBE_GROUP_BY", False)
QBE_SHOW_ROW_NUMBER = getattr(settings, "QBE_SHOW_ROW_NUMBER", True)
# saved queries
QBE_SAVED_QUERIES = 'django_qbe.savedqueries' in settings.INSTALLED_APPS
|
maxwell-k/qbe
|
setup.py
|
import codecs
import re
from os import path
from setuptools import setup, find_packages
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path).read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='django_qbe',
version=find_version('django_qbe', '__init__.py'),
author='<NAME>',
author_email='<EMAIL>',
url='http://versae.github.com/qbe/',
description='Django admin tool for custom reports',
long_description=read('README.rst'),
license='MIT',
keywords='qbe django admin reports query sql',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: JavaScript',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
zip_safe=False,
packages=find_packages(),
include_package_data=True,
install_requires=['future', 'django-picklefield'],
)
|
maxwell-k/qbe
|
django_qbe/urls.py
|
<reponame>maxwell-k/qbe
# -*- coding: utf-8 -*-
try:
from django.conf.urls import url
except ImportError:
# Backward compatibility for Django prior to 1.6
from django.conf.urls.defaults import url
from django_qbe.exports import formats
from . import views
urlpatterns = [
url(r'^$', views.qbe_form, name="qbe_form"),
url(r'^qbe.js$', views.qbe_js, name="qbe_js"),
url(r'^bookmark/$', views.qbe_bookmark, name="qbe_bookmark"),
url(r'^proxy/$', views.qbe_proxy, name="qbe_proxy"),
url(r'^auto/$', views.qbe_autocomplete, name="qbe_autocomplete"),
url(r'^(?P<query_hash>(.*))/results\.(?P<format>(%s))$' % "|".join(formats.keys()), views.qbe_export, name="qbe_export"),
url(r'^(?P<query_hash>(.*))/results/$', views.qbe_results, name="qbe_results"),
url(r'^(?P<query_hash>(.*))/$', views.qbe_form, name="qbe_form"),
]
|
maxwell-k/qbe
|
django_qbe/savedqueries/apps.py
|
<reponame>maxwell-k/qbe
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class QBESavedQueriesConfig(AppConfig):
name = 'django_qbe.savedqueries'
verbose_name = _("Query by Example")
|
maxwell-k/qbe
|
django_qbe/savedqueries/south_migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SavedQuery'
db.create_table('savedqueries_savedquery', (
('query_hash', self.gf('django.db.models.fields.CharField')(max_length=32, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('query_data', self.gf('picklefield.fields.PickledObjectField')()),
('date_created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('date_updated', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('savedqueries', ['SavedQuery'])
def backwards(self, orm):
# Deleting model 'SavedQuery'
db.delete_table('savedqueries_savedquery')
models = {
'savedqueries.savedquery': {
'Meta': {'object_name': 'SavedQuery'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'query_data': ('picklefield.fields.PickledObjectField', [], {}),
'query_hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'primary_key': 'True'})
}
}
complete_apps = ['savedqueries']
|
maxwell-k/qbe
|
django_qbe/savedqueries/admin.py
|
<reponame>maxwell-k/qbe
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
try:
from django.contrib.admin.utils import unquote
except ImportError:
# Backward compatibility for Django prior to 1.7
from django.contrib.admin.util import unquote
try:
from django.conf.urls import url
except ImportError:
# Backward compatibility for Django prior to 1.6
from django.conf.urls.defaults import url
from django.shortcuts import redirect
try:
from functools import update_wrapper
except ImportError:
# Backward compatibility for Django prior to 1.6
from django.utils.functional import update_wrapper
from django_qbe.savedqueries.models import SavedQuery
from django_qbe.settings import QBE_ADMIN
from django_qbe.utils import admin_site
class SavedQueryAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'date_created', 'query_hash',
'run_link')
def run_link(self, obj):
info = (QBE_ADMIN,
self.model._meta.app_label,
self.model._meta.model_name or self.model._meta.module_name)
return (u'<span class="nowrap"><a href="%s">%s</a>'
u' | <a href="%s">%s</a></span>' %
(reverse("%s:%s_%s_run" % info, args=(obj.pk,)), _("Run"),
reverse("qbe_form", kwargs={'query_hash': obj.pk}),
_("Edit")))
run_link.short_description = _("query")
run_link.allow_tags = True
def get_urls(self):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = (self.model._meta.app_label,
self.model._meta.model_name or self.model._meta.module_name)
urlpatterns = [
url(r'^(.+)/run/$', wrap(self.run_view), name='%s_%s_run' % info),
]
return urlpatterns + super(SavedQueryAdmin, self).get_urls()
def save_model(self, request, obj, form, change):
query_hash = request.GET.get("hash", "")
obj.query_hash = query_hash
obj.query_data = request.session["qbe_query_%s" % query_hash]
obj.save()
def add_view(self, request, *args, **kwargs):
query_hash = request.GET.get("hash", "")
query_key = "qbe_query_%s" % query_hash
if not query_key in request.session:
return redirect("qbe_form")
return super(SavedQueryAdmin, self).add_view(request, *args, **kwargs)
def run_view(self, request, query_hash, extra_context=None):
obj = self.get_object(request, unquote(query_hash))
data = obj.query_data
query_key = "qbe_query_%s" % query_hash
if not query_key in request.session:
request.session[query_key] = data
return redirect("qbe_results", query_hash)
admin_site.register(SavedQuery, SavedQueryAdmin)
|
maxwell-k/qbe
|
django_qbe/savedqueries/__init__.py
|
<filename>django_qbe/savedqueries/__init__.py<gh_stars>10-100
default_app_config = 'django_qbe.savedqueries.apps.QBESavedQueriesConfig'
|
maxwell-k/qbe
|
django_qbe/operators.py
|
from builtins import object
from django.conf import settings
from django.db import connections
from django.db.models.fields import Field
from importlib import import_module
from future.utils import with_metaclass
DATABASES = settings.DATABASES
BACKEND_TO_OPERATIONS = {
'mysql': 'MySQLOperations',
'oracle': 'OracleOperations',
'postgis': 'PostGISOperations',
'spatialite': 'SpatiaLiteOperations',
}
"""
Plugin infrastructure based on
http://martyalchin.com/2008/jan/10/simple-plugin-framework/
"""
class OperatorMount(type):
def __init__(cls, *args, **kwargs):
if not hasattr(cls, 'operators'):
# This branch only executes when processing the mount point itself.
# So, since this is a new operator type, not an implementation,
# this class shouldn't be registered as a operator. Instead, it
# sets up a list where operators can be registered later.
cls.operators = {}
else:
# This must be a operator implementation, which should be
# registered.
# Simply appending it to the list is all that's needed to keep
# track of it later.
if hasattr(cls, 'slug') and hasattr(cls, 'label'):
cls.operators[cls.slug] = cls
def get_operators(self):
return self.operators
class CustomOperator(with_metaclass(OperatorMount, object)):
"""
Mount point for operators which refer to actions that can be performed.
Operators implementing this reference should provide the following
attributes:
======== ========================================================
slug A unique slug that must identify this operator
label The label that will be displayed in the criteria dropdown
======== ========================================================
"""
def __init__(self, db_field, operator, value, db_alias="default"):
self.params = []
self.wheres = []
self.db_field = db_field
self.operator = operator
self.value = value
self._db_alias = db_alias
self._db_connection = connections["default"]
database_properties = DATABASES.get(self._db_alias, "default")
module = database_properties['ENGINE']
try:
base_mod = import_module("%s.base" % module)
intros_mod = import_module("%s.introspection" % module)
except ImportError:
pass
if base_mod and intros_mod:
self._db_operators = base_mod.DatabaseWrapper.operators
if module.startswith('django.contrib.gis'):
operations_name = BACKEND_TO_OPERATIONS[module.split('.')[-1]]
DatabaseOperations = getattr(base_mod, operations_name)
else:
DatabaseOperations = base_mod.DatabaseOperations
try:
self._db_operations = DatabaseOperations(self._db_connection)
except TypeError:
# Some engines have no params to instance DatabaseOperations
self._db_operations = DatabaseOperations()
def _get_lookup(self, operator, over):
lookup = Field().get_db_prep_lookup(operator, over,
connection=self._db_connection,
prepared=True)
if isinstance(lookup, (tuple, list)):
return lookup[0]
return lookup
def get_params(self):
"""
returns a list
"""
value = self._get_lookup(self.operator, self.value)
self.params.append(self.value)
return self.params
def get_wheres(self):
"""
returns a list
"""
self.wheres.append(u"%s %s"
% (lookup_cast(operator) % self.db_field,
self.operator))
return self.wheres
|
jobscore/ansible-role-resque-exporter
|
molecule/default/tests/test_default.py
|
<gh_stars>0
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_services_running_and_enabled(host):
assert host.service('prometheus-resque-exporter').is_enabled
assert host.service('prometheus-resque-exporter').is_running
def test_node_exporter_metrics(host):
out = host.check_output('curl http://localhost:9447/metrics')
assert 'resque_up' in out
|
Belyanova/Python_-training
|
model/configurations_user.py
|
<gh_stars>0
from sys import maxsize
class Configurations_user:
def __init__(self, firstname=None, middlename=None, last_name=None, nickname=None, title=None, company=None, address=None,
phone_home=None,phone_mobile=None, phone_work=None, mail1=None, mail2=None, mail3=None, bd_day=None, bd_month=None,
bd_year=None, aday=None, amonth=None, ayear=None, address2=None, phone2=None, notes=None, id=None,
all_phones_from_home_page=None, user_name=None, all_mail=None):
self.firstname = firstname
self.middlename = middlename
self.last_name = last_name
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.phone_home = phone_home
self.phone_mobile = phone_mobile
self.phone_work = phone_work
self.mail1 = mail1
self.mail2 = mail2
self.mail3 = mail3
self.bd_day = bd_day
self.bd_month = bd_month
self.bd_year = bd_year
self.aday = aday
self.amonth = amonth
self.ayear = ayear
self.address2 = address2
self.phone2 = phone2
self.notes = notes
self.id = id
self.all_phones_from_home_page=all_phones_from_home_page
self.user_name= user_name
self.all_mail = all_mail
def __repr__(self):
return "%s:%s:%s" % (self.id, self.last_name, self.firstname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.firstname == other.firstname \
and self.last_name == other.last_name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
|
Belyanova/Python_-training
|
fixture/orm.py
|
<reponame>Belyanova/Python_-training
from datetime import datetime
from pony.orm import *
from model.configurations_group import Configurations_group
from model.configurations_user import Configurations_user
from pymysql.converters import decoders
class ORM_fixture:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column='group_id')
name = Optional(str, column='group_name')
header = Optional(str, column='group_header')
footer = Optional(str, column='group_footer')
users = Set(lambda: ORM_fixture.ORMuser, table="address_in_groups",column="id", reverse="groups")#, lasy=True)
class ORMuser(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column='id')
firstname = Optional (str, column='firstname')
lastname = Optional(str, column='lastname')
deprecated = Optional(datetime, column='deprecated')
groups = Set(lambda: ORM_fixture.ORMGroup, table="address_in_groups", column="group_id", reverse="users")#, lasy=True)
def __init__(self,host, name, user, password):
self.db.bind('mysql', host=host, database=name, user=user, password=password) #, conv=decoders)
self.db.generate_mapping()
sql_debug(True)
def convert_groups_to_model(self,groups):
def convert(group):
return Configurations_group (id=str(group.id), name=group.name, header=group.header, footer=group.footer)
return list(map(convert, groups))
def convert_users_to_model(self,users):
def convert(user):
return Configurations_user(id=str(user.id), last_name=user.lastname, firstname=user.firstname)
return list(map(convert, users))
@db_session
def get_group_list(self):
return self.convert_groups_to_model(select(g for g in ORM_fixture.ORMGroup))
@db_session
def get_user_list(self):
return self.convert_users_to_model(select(c for c in ORM_fixture.ORMuser if c.deprecated is None))
@db_session
def get_users_in_group(self, group):
orm_group = list(select(g for g in ORM_fixture.ORMGroup if g.id == group.id))[0]
return self.convert_users_to_model(orm_group.users)
@db_session
def get_users_not_in_group(self, group):
orm_group = list(select(g for g in ORM_fixture.ORMGroup if g.id == group.id))[0]
return self.convert_users_to_model(
select(c for c in ORM_fixture.ORMuser if c.deprecated is None and orm_group not in c.groups))
|
Belyanova/Python_-training
|
bdd/user/user_scenarios.py
|
from pytest_bdd import scenario
from .user_steps import *
@scenario('users.feature', 'Add new user')
def test_add_new_user():
pass
@scenario('users.feature', 'Delete a user')
def test_delete_user():
pass
@scenario('users.feature', 'Edit a user')
def test_edit_user():
pass
|
Belyanova/Python_-training
|
test/test_edit_group.py
|
# -*- coding: utf-8 -*-
from random import randrange
from model.configurations_group import Configurations_group
import random
import pytest
import allure
def test_case(app,db,check_ui,json_groups):
group = json_groups
with allure.step('Given a non-empty group list'):
if len(db.get_group_list()) == 0:
app.group.create(group)
old_groups = db.get_group_list()
with allure.step('Given a random group from the list'):
group = random.choice(old_groups)
with allure.step('When I edit the group from the list'):
app.group.edit_group_by_id(group.id,group)
with allure.step('Then the new group list is equal to the old list without the edit group'):
assert len(old_groups) == app.group.count()
new_groups = db.get_group_list()
assert sorted(new_groups, key=Configurations_group.id_or_max) == sorted(app.group.get_group_list(),key=Configurations_group.id_or_max)
"""def test_modify_group_name(app):
old_groups = app.group.get_group_list()
app.group.modify_first_group(Configurations_group(name="new_name"))
new_groups = app.group.get_group_list()
assert len(old_groups) == len(new_groups)"""
|
Belyanova/Python_-training
|
test/test_user.py
|
<reponame>Belyanova/Python_-training
import re
from model.configurations_user import Configurations_user
import pytest
import allure
def test_user_on_home_page(app, db, json_users):
user = json_users
with allure.step('Given a non-empty user list'):
if app.user.count() == 0:
app.user.add_new_user(user)
with allure.step('Given a user from home page and db'):
users_from_home_page = app.user.get_user_list()
users_from_db = db.get_user_list()
with allure.step('Then the user from home page is equal to the db '):
assert len(users_from_home_page) == len(users_from_db)
#assert sorted(users_from_home_page, key=Configurations_user.id_or_max) == sorted(users_from_db, key=Configurations_user.id_or_max)
for i in range(len(users_from_home_page)):
user_from_home_page_by_index = sorted(app.user.get_user_list(), key=Configurations_user.id_or_max)[i]
user_from_db_by_index = db.get_user_list()[i]
assert user_from_home_page_by_index.user_name == merge_user_like_on_home_page(user_from_db_by_index)
assert user_from_home_page_by_index.all_mail == merge_mail_like_on_home_page(user_from_db_by_index)
assert user_from_home_page_by_index.all_phones_from_home_page == merge_phones_like_on_home_page(user_from_db_by_index)
def clear(s):
return re.sub("[()-]","",s)
def merge_phones_like_on_home_page(user):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x), filter(lambda x: x is not None,
[user.phone_home, user.phone_mobile, user.phone_work, user.phone2]))))
def merge_user_like_on_home_page(user):
return "".join(filter(lambda x: x != "",filter(lambda x: x is not None,
[user.last_name, user.firstname])))
def merge_mail_like_on_home_page(user):
return "\n".join(filter(lambda x: x != "",filter(lambda x: x is not None,
[user.mail1, user.mail2, user.mail3])))
|
Belyanova/Python_-training
|
fixture/user_group.py
|
from selenium.webdriver.support.ui import Select
from model.configurations_user import Configurations_user
from model.configurations_group import Configurations_group
class UserGroupHelper:
def __init__(self, app):
self.app = app
def group_selection(self, group):
wd = self.app.wd
wd.find_element_by_name("group").click()
Select(wd.find_element_by_name("group")).select_by_visible_text(group)
def open_users_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/index.php") and (wd.find_elements_by_xpath("//form[2]/div[1]/input"))):
wd.find_element_by_link_text("home").click()
user_cache = None
def get_user_in_group_list(self):
wd = self.app.wd
if self.user_cache is None:
wd = self.app.wd
self.user_cache = []
for elements in wd.find_elements_by_name("entry"):
text = elements.find_elements_by_xpath(".//td")
firstname = text[2].text
last_name = text[1].text
user_name = (text[1].text + text[2].text)
id = elements.find_element_by_name("selected[]").get_attribute("value")
all_phones = text[5].text
address = text[3].text
all_mail = text[4].text
self.user_cache.append(Configurations_user(user_name=user_name, id=id,address=address,last_name=last_name,
firstname=firstname, all_mail=all_mail, all_phones_from_home_page=all_phones))
return list(self.user_cache)
def del_user_in_group(self):
wd = self.app.wd
wd.find_element_by_xpath("//input[@name='remove']").click()
self.user_cache = None
def user_add_group(self, group):
wd = self.app.wd
wd.find_element_by_name("to_group").click()
Select(wd.find_element_by_name("to_group")).select_by_visible_text(group)
wd.find_element_by_name("add").click()
def select_user_by_id(self, id):
wd = self.app.wd
self.open_users_page()
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def select_user_in_group(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def select_user_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
|
Belyanova/Python_-training
|
test/test_user_add_group.py
|
from model.configurations_user import Configurations_user
from model.configurations_group import Configurations_group
import random
import pytest
import allure
def test_user_add_group(app, db):
user_new = Configurations_user("User_name2", "name", "Last_name", "Nickname", "Title", "Company", "Address",
"999888777",
"12345678", "87654321", "e-mail_1", "e-mail_2", "e-mail_3", "1", "April", "1998",
"1",
"April", "2000", "Address", "Home", "Notes")
group_new = Configurations_group("name", "header","footer")
with allure.step('Given a non-empty user list'):
if len(db.get_user_list()) == 0:
app.user.add_new_user(user_new)
app.user.open_users_page()
all_user = db.get_user_list()
with allure.step('Given a non-empty group list'):
if len(db.get_group_list()) == 0:
app.group.create(group_new)
list_group = db.get_group_list()
with allure.step('Given a random group from the list'):
group = random.choice(list_group)
with allure.step('Given a group from the list'):
app.user.open_users_page()
app.UserGroup.group_selection(group.name)
list_user_in_group = app.UserGroup.get_user_in_group_list()
with allure.step('Given a non-empty user from the group list'):
user_list = [x for x in all_user if x not in list_user_in_group]
if len(user_list) == 0:
app.user.add_new_user(user_new)
all_user = db.get_user_list()
user_list = [x for x in all_user if x not in list_user_in_group]
with allure.step('Given a random user from the list'):
user_add_group = random.choice(user_list)
app.UserGroup.select_user_by_id(user_add_group.id)
with allure.step('When I add the user from the group list'):
app.UserGroup.user_add_group(group.name)
with allure.step('Then the new user group list is equal to the old list '):
list_user_in_group_new = db.get_user_in_group_list()
assert len(list_user_in_group_new) == len(list_user_in_group) + 1
|
Belyanova/Python_-training
|
test/test_db_matches_ui.py
|
<gh_stars>0
from model.configurations_group import Configurations_group
from timeit import timeit
def test_group_list(app, db):
print(timeit(lambda: app.group.get_group_list(), number=1))
def clean (group):
return Configurations_group (id=group.id, name=group.name.strip())
print(timeit(lambda:map(clean,db.get_group_list()), number=1))
#sorted(ui_list, key=Configurations_group.id_or_max) == sorted(db_list, key=Configurations_group.id_or_max)
|
Belyanova/Python_-training
|
fixture/db.py
|
import pymysql
from model.configurations_group import Configurations_group
from model.configurations_user import Configurations_user
from model.user_in_group import UserGroup
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = pymysql.connect(host=host, database=name , user=user , password=password, autocommit=True )
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id,name,header,footer) = row
list.append(Configurations_group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_user_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select id, firstname, lastname, middlename, nickname, title, company, address, home,"
"mobile, work, email, email2, email3, bday, bmonth, byear, aday, amonth, ayear, address2, phone2, notes from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id, firstname, lastname, middlename, nickname, title, company, address, home, mobile, work,
email, email2, email3, bday, bmonth, byear, aday, amonth, ayear, address2,phone2, notes) = row
list.append(Configurations_user(id=str(id), firstname=firstname, last_name=lastname, middlename=middlename,
nickname=nickname, title = title, company = company, address = address,phone_home = home, phone_mobile = mobile,
bd_day = str(bday),bd_month = bmonth,bd_year= byear, aday=str(aday), amonth=amonth, ayear=ayear,
phone_work = work, mail1 = email, mail2 = email2, mail3 = email3, address2 = address2, phone2 = phone2, notes = notes))
finally:
cursor.close()
return list
def get_user_in_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select id, group_id from address_in_groups")
for row in cursor:
(id, group_id) = row
list.append(UserGroup(id=str(id), group_id=str(group_id)))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close()
|
Belyanova/Python_-training
|
test/test_add_user.py
|
<gh_stars>0
# -*- coding: utf-8 -*-
from model.configurations_user import Configurations_user
import pytest
import allure
def test_user(app, db, check_ui, json_users):
user = json_users
with allure.step('Given a user list'):
old_users = db.get_user_list()
with allure.step('When I add a user %s the list' % user):
app.user.add_new_user(user)
with allure.step('Then the new user list is equal to the old list with the added user'):
new_users = db.get_user_list()
old_users.append(user)
assert sorted(old_users, key=Configurations_user.id_or_max) == sorted(new_users, key=Configurations_user.id_or_max)
|
Belyanova/Python_-training
|
generator/user.py
|
<reponame>Belyanova/Python_-training<gh_stars>0
from model.configurations_user import Configurations_user
import random
import string
import os.path
import jsonpickle
import getopt
import sys
from random import randint
try:
opts, args = getopt.getopt(sys.argv[1:],"n:f:", ["number of users", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/users.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_month():
month = ["January","February","March","April","May","June","July","August","September","October","November","December"]
return random.choice(month)
testdata = [
Configurations_user(firstname=random_string("firstname", 3), middlename=random_string("middlename", 5),
last_name=random_string("last_name", 3), nickname=random_string("nickname", 5),
title=random_string("title", 3), company=random_string("company", 5),
address=random_string("address", 3), phone_home=random_string("phone_home", 5),
phone_mobile=random_string("phone_mobile", 3), phone_work=random_string("phone_work", 5),
mail1=random_string("mail1", 3), mail2=random_string("mail2", 5),mail3=random_string("mail3", 3),
bd_day=str(randint(1, 31)), bd_month=random_month(),bd_year=str(randint(1900, 2100)),
aday=str(randint(1, 31)),amonth=random_month(),ayear=(randint(1900, 2100)),
address2=random_string("address2", 5), phone2=random_string("phone2", 3),notes = random_string("notes", 5))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..",f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
|
Belyanova/Python_-training
|
bdd/user/user_steps.py
|
<gh_stars>0
from pytest_bdd import given, when, then
from model.configurations_user import Configurations_user
import random
@given('a user list', target_fixture="user_list")
def user_list(db):
return db.get_user_list()
@given('a user', target_fixture="new_user")
def new_user():
return(Configurations_user("User_name", "name", "Last_name", "Nickname", "Title", "Company", "Address", "999888777",
"12345678", "87654321", "e-mail_1", "e-mail_2", "e-mail_3", "1", "April", "1998", "1",
"April", "2000", "Address", "Home", "Notes"))
@when('I add the user the list')
def add_new_user(app, new_user):
app.user.add_new_user(new_user)
@then('the new user list is equal to the old list with the added user')
def verify_user_added(db, user_list, new_user):
old_users = user_list
new_users = db.get_user_list()
assert len(new_users) == len(old_users)+1
old_users.append(new_user)
assert sorted(new_users, key=Configurations_user.id_or_max) == sorted(old_users, key=Configurations_user.id_or_max)
@given('a non-empty user list', target_fixture="non_empty_user_list")
def non_empty_user_list(app, db):
if len(db.get_user_list()) == 0:
app.user.add_new_user(Configurations_user("User_name", "name", "Last_name", "Nickname", "Title", "Company", "Address", "999888777",
"12345678", "87654321", "e-mail_1", "e-mail_2", "e-mail_3", "1", "April", "1998", "1",
"April", "2000", "Address", "Home", "Notes"))
return db.get_user_list()
@given('a random user from the list', target_fixture="random_user")
def random_user(non_empty_user_list):
return random.choice(non_empty_user_list)
@when('I delete the user from the list')
def delete_user(app, random_user):
app.user.delete_user_by_id(random_user.id)
@then('the new user list is equal to the old list without the deleted user')
def verify_user_dell(db, non_empty_user_list, random_user, app, check_ui):
old_users = non_empty_user_list
new_users = db.get_user_list()
assert len(old_users) - 1 == len(new_users)
old_users.remove(random_user)
assert old_users == new_users
if check_ui:
assert sorted(new_users, key=Configurations_user.id_or_max) == sorted(app.user.get_user_list(),
key=Configurations_user.id_or_max)
@when('I edit the user from the list')
def edit_user(app, random_user):
app.user.edit_user_by_id(random_user.id, random_user)
@then('the new user list is equal to the old list without the edit user')
def verify_user_dell(db, non_empty_user_list, app, check_ui):
old_users = non_empty_user_list
assert len(old_users) == app.user.count()
new_users = db.get_user_list()
assert old_users == new_users
if check_ui:
assert sorted(new_users, key=Configurations_user.id_or_max) == sorted(app.user.get_user_list(),
key=Configurations_user.id_or_max)
|
Belyanova/Python_-training
|
check_db_connection.py
|
<gh_stars>0
import pymysql
from fixture.orm import ORM_fixture
from model.configurations_group import Configurations_group
db = ORM_fixture (host="127.0.0.1", name="addressbook", user="root", password="")
try:
l = db.get_users_not_in_group(Configurations_group(id="60"))
for item in l:
print(item)
print(len(l))
finally:
pass #db.destroy()
|
Belyanova/Python_-training
|
fixture/user.py
|
from selenium.webdriver.support.ui import Select
from model.configurations_user import Configurations_user
import re
class UserHelper:
def __init__(self, app):
self.app = app
def open_users_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/index.php") and (wd.find_elements_by_xpath("//form[2]/div[1]/input"))):
wd.find_element_by_link_text("home").click()
def open_add_user_page(self):
wd = self.app.wd
if not wd.find_elements_by_name("photo"):
wd.find_element_by_link_text("add new").click()
return wd
def save_user(self):
wd = self.app.wd
wd.find_element_by_xpath('(//input[@name="submit"])[2]').click()
def fill_in_user(self, configurations_user):
wd = self.app.wd
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(configurations_user.firstname)
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(configurations_user.middlename)
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(configurations_user.last_name)
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(configurations_user.nickname)
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(configurations_user.title)
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(configurations_user.company)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(configurations_user.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(configurations_user.phone_home)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(configurations_user.phone_mobile)
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(configurations_user.phone_work)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(configurations_user.mail1)
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(configurations_user.mail2)
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(configurations_user.mail3)
wd.find_element_by_name("bday").click()
wd.find_element_by_name("bday").send_keys(configurations_user.bd_day)
wd.find_element_by_name("bmonth").click()
Select(wd.find_element_by_name("bmonth")).select_by_visible_text(configurations_user.bd_month)
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(configurations_user.bd_year)
wd.find_element_by_name("aday")
wd.find_element_by_name("aday").send_keys(configurations_user.aday)
wd.find_element_by_name("amonth").click()
Select(wd.find_element_by_name("amonth")).select_by_visible_text(configurations_user.amonth)
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(configurations_user.ayear)
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(configurations_user.address2)
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(configurations_user.phone2)
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(configurations_user.notes)
def add_new_user(self, configurations_user):
wd = self.app.wd
self.open_add_user_page()
self.fill_in_user(configurations_user)
wd.find_element_by_xpath('(//input[@name="submit"])[2]').click()
self.return_start_page()
self.user_cache = None
def select_first_user(self):
wd = self.app.wd
self.select_user_by_index(0)
def select_user_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_first_user(self):
self.delete_user_by_index(0)
def delete_user_by_index(self,index):
wd = self.app.wd
# выбрать первый контакт
wd.find_element_by_link_text("home").click()
self.select_user_by_index(index)
wd.find_element_by_xpath('//input[@value="Delete"]').click()
wd.switch_to_alert().accept()
wd.find_element_by_css_selector("div.msgbox")
self.user_cache = None
def edit_first_user(self):
self.edit_user_by_index(0)
def open_edit_user_by_index(self, index):
wd = self.app.wd
self.open_users_page()
self.select_user_by_index(index)
wd.find_elements_by_xpath('//img[@alt="Edit"]')[index].click()
def open_view_user_by_index(self, index):
wd = self.app.wd
self.open_users_page()
self.select_user_by_index(index)
wd.find_elements_by_xpath("//img[@alt='Details']")[index].click()
def edit_user_by_index(self, index, configurations_user):
wd = self.app.wd
self.open_edit_user_by_index(index)
self.fill_in_user(configurations_user)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.user_cache = None
def return_start_page(self):
wd = self.app.wd
if wd.find_element_by_link_text("home page"):
wd.find_element_by_link_text("home page").click()
wd.find_element_by_link_text("Logout")
def count(self):
wd = self.app.wd
self.open_users_page()
return len(wd.find_elements_by_name("selected[]"))
user_cache = None
def get_user_list(self):
if self.user_cache is None:
wd = self.app.wd
self.open_users_page()
self.user_cache = []
for elements in wd.find_elements_by_name("entry"):
text = elements.find_elements_by_xpath(".//td")
firstname = text[2].text
last_name = text[1].text
user_name = (text[1].text + text[2].text)
id = elements.find_element_by_name("selected[]").get_attribute("value")
all_phones = text[5].text
address = text[3].text
all_mail = text[4].text
self.user_cache.append(Configurations_user(user_name=user_name, id=id,address=address,last_name=last_name,
firstname=firstname, all_mail=all_mail, all_phones_from_home_page=all_phones))
return list(self.user_cache)
def get_user_info_from_edit_page(self, index):
wd = self.app.wd
self.open_edit_user_by_index(index)
id = wd.find_element_by_name("id").get_attribute("value")
last_name = wd.find_element_by_name("lastname").get_attribute("value")
firstname = wd.find_element_by_name("firstname").get_attribute("value")
phone_home = wd.find_element_by_name("home").get_attribute("value")
phone_mobile = wd.find_element_by_name("mobile").get_attribute("value")
phone_work = wd.find_element_by_name("work").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
mail1 = wd.find_element_by_name("email").get_attribute("value")
mail2 = wd.find_element_by_name("email2").get_attribute("value")
mail3 = wd.find_element_by_name("email3").get_attribute("value")
return Configurations_user(firstname=firstname, last_name=last_name, phone2=phone2,address=address,
phone_home=phone_home, phone_mobile=phone_mobile,phone_work=phone_work, id=id,
mail1=mail1, mail2=mail2, mail3=mail3)
def get_user_from_view_page(self, index):
wd = self.app.wd
self.open_view_user_by_index(index)
text = wd.find_element_by_id("content").text
phone_home = re.search("H: (.*)", text).group(1)
phone_mobile = re.search("M: (.*)", text).group(1)
phone_work = re.search("W: (.*)", text).group(1)
phone2 = re.search("P: (.*)", text).group(1)
return Configurations_user(phone2=phone2, phone_home=phone_home, phone_mobile=phone_mobile, phone_work=phone_work)
def delete_user_by_id(self,id):
wd = self.app.wd
# выбрать первый контакт
wd.find_element_by_link_text("home").click()
self.select_user_by_id(id)
wd.find_element_by_xpath('//input[@value="Delete"]').click()
wd.switch_to_alert().accept()
wd.find_element_by_css_selector("div.msgbox")
self.user_cache = None
def select_user_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def open_user_to_edit_by_id(self, id):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_xpath("//*[@id='maintable']/tbody").find_element_by_xpath("//*[@href='edit.php?id=%s']" % id).click()
def edit_user_by_id(self,id,configurations_user):
wd = self.app.wd
self.open_user_to_edit_by_id(id)
self.fill_in_user(configurations_user)
wd.find_element_by_name("update").click()
wd.find_element_by_link_text("home page").click()
self.contact_cache = None
|
Belyanova/Python_-training
|
test/test_edit_user.py
|
<filename>test/test_edit_user.py
# -*- coding: utf-8 -*-
from model.configurations_user import Configurations_user
import re
import random
import pytest
import allure
def test_case(app, db, check_ui,json_users):
user = json_users
with allure.step('Given a non-empty user list'):
if len(db.get_user_list()) == 0:
app.user.add_new_user(user)
old_users = db.get_user_list()
with allure.step('Given a random user from the list'):
user = random.choice(old_users)
with allure.step('When I edit the user from the list'):
app.user.edit_user_by_id(user.id,user)
with allure.step('Then the new user list is equal to the old list without the edit user'):
assert len(old_users) == app.user.count()
new_users = db.get_user_list()
assert sorted(old_users, key=Configurations_user.id_or_max) == sorted(new_users, key=Configurations_user.id_or_max)
def clear(s):
return re.sub("[() -]","",s)
|
Belyanova/Python_-training
|
test/test_del_user.py
|
# -*- coding: utf-8 -*-
from model.configurations_user import Configurations_user
import random
import pytest
import allure
def test_delete_first_group(app, db, check_ui, json_users):
user = json_users
with allure.step('Given a non-empty user list'):
if len(db.get_user_list()) == 0:
app.user.add_new_user(user)
old_users = db.get_user_list()
with allure.step('Given a random user from the list'):
user = random.choice(old_users)
with allure.step('When I delete the user from the list'):
app.user.delete_user_by_id(user.id)
with allure.step('Then the new user list is equal to the old list without the deleted user'):
assert len(old_users) - 1 == app.user.count()
new_users = db.get_user_list()
old_users .remove(user)
assert sorted(new_users , key=Configurations_user.id_or_max) == sorted(old_users, key=Configurations_user.id_or_max)
|
Belyanova/Python_-training
|
data/users.py
|
from model.configurations_user import Configurations_user
user = Configurations_user("User_name", "name", "Last_name", "Nickname", "Title", "Company", "Address", "999888777",
"12345678", "87654321", "e-mail_1", "e-mail_2", "e-mail_3", "1", "April", "1998", "1",
"April", "2000", "Address", "Home", "Notes")
|
Belyanova/Python_-training
|
data/groups.py
|
from model.configurations_group import Configurations_group
testdata = [
Configurations_group(name="name1", header="header1", footer="footer1"),
Configurations_group(name="name2", header="header2", footer="footer2"),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.