hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0fcc6d24d3d335350d5fc8d0e26b2a5c90342944 | 2,304 | py | Python | myproject/products/views.py | abhishekmorya/django-rest-assignment | 069fedafa2ba92e06157441500cd3f1fecf450e9 | [
"MIT"
] | null | null | null | myproject/products/views.py | abhishekmorya/django-rest-assignment | 069fedafa2ba92e06157441500cd3f1fecf450e9 | [
"MIT"
] | null | null | null | myproject/products/views.py | abhishekmorya/django-rest-assignment | 069fedafa2ba92e06157441500cd3f1fecf450e9 | [
"MIT"
] | null | null | null | from rest_framework import viewsets, filters, mixins
from rest_framework.response import Response
from products import models
from products import serializers
class CategoryApiView(viewsets.ReadOnlyModelViewSet):
"""API View for Category"""
serializer_class = serializers.CategorySerializer
queryset = models.Category.objects.all()
class SubCategoryApiView(viewsets.ReadOnlyModelViewSet):
"""API View for SubCategory"""
serializer_class = serializers.SubCategorySerializer
queryset = models.SubCategory.objects.all().order_by('-created_on')
def retrieve(self, response, pk):
category = models.Category.objects.filter(
name = pk
).first()
queryset = models.SubCategory.objects.filter(category = category.id)
values = [x.to_dict() for x in queryset]
return Response(values)
class ProductApiView(mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.CreateModelMixin, viewsets.GenericViewSet):
"""API View for Product"""
serializer_class = serializers.ProductSerializer
queryset = models.Product.objects.all().order_by('-created_on')
class ProductSubCategoryView(viewsets.ReadOnlyModelViewSet):
"""Products for a sub-category"""
serializer_class = serializers.ProductSerializer
queryset = models.Product.objects.all()
def retrieve(self, request, pk):
sub_category = models.SubCategory.objects.filter(
name = pk
).first()
if sub_category is None:
return Response("Error: Sub Category not found", 404)
queryset = models.Product.objects.filter(sub_category = sub_category.id)
values = [x.to_dict() for x in queryset]
return Response(values)
class ProductCategoryView(viewsets.ReadOnlyModelViewSet):
"""Product for a category"""
serializer_class = serializers.ProductSerializer
queryset = models.Product.objects.all()
def retrieve(self, request, pk):
category = models.Category.objects.filter(
name = pk
).first()
if category is None:
return Response("Error: Category Not Found", 404)
q = self.queryset.filter(
category = category.id
)
values = [x.to_dict() for x in q]
return Response(values) | 31.135135 | 121 | 0.691406 | 2,129 | 0.924045 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.098958 |
0fcd071e5471654de2c169fc870632e566ae5177 | 302 | py | Python | R function.py | haideraheem/Simple-Python-Programs | dc0a71e88adc7323d46e75168e8fd2db97eea775 | [
"CC0-1.0"
] | null | null | null | R function.py | haideraheem/Simple-Python-Programs | dc0a71e88adc7323d46e75168e8fd2db97eea775 | [
"CC0-1.0"
] | null | null | null | R function.py | haideraheem/Simple-Python-Programs | dc0a71e88adc7323d46e75168e8fd2db97eea775 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 6 17:11:11 2018
@author: Haider Raheem
"""
a= float(input("Type a : "))
b= float(input("Type b : "))
c= float(input("Type c : "))
x= (a*b)+(b*c)+(c*a)
y= (a+b+c)
r= x/y
print( )
print("The result of the calculation is {0:.2f}".format(r)) | 20.133333 | 59 | 0.539735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.562914 |
0fce3a1010ed62a22f3091187ec07f0057d5fa5a | 449 | py | Python | src/selectedtests/app/controllers/health_controller.py | isabella232/selected-tests | 890cd5f39f5571d50f0406b4c25a1a2eef1006a3 | [
"Apache-2.0"
] | 2 | 2020-04-13T11:26:57.000Z | 2022-01-21T00:03:52.000Z | src/selectedtests/app/controllers/health_controller.py | mongodb/selected-tests | 467f71f1d45b06ac3cc5db252f18658f8cd93083 | [
"Apache-2.0"
] | 54 | 2019-09-26T18:56:34.000Z | 2022-03-12T01:07:00.000Z | src/selectedtests/app/controllers/health_controller.py | isabella232/selected-tests | 890cd5f39f5571d50f0406b4c25a1a2eef1006a3 | [
"Apache-2.0"
] | 6 | 2019-10-01T14:24:27.000Z | 2020-02-13T15:53:47.000Z | """Controller for the health endpoints."""
from fastapi import APIRouter
from pydantic import BaseModel
router = APIRouter()
class HealthCheckResponse(BaseModel):
"""Model for health check responses."""
online: bool
@router.get("", response_model=HealthCheckResponse, description="Health check endpoint")
def health() -> HealthCheckResponse:
"""Get the current status of the service."""
return HealthCheckResponse(online=True)
| 24.944444 | 88 | 0.74833 | 99 | 0.22049 | 0 | 0 | 218 | 0.485523 | 0 | 0 | 150 | 0.334076 |
0fce4b175efd342362c8eca2117758091061c40c | 1,533 | py | Python | Client/message/friend.py | Ricky-Hao/IMPK-Client | 791150a43fff157aa439716d63d6c4fece912b85 | [
"MIT"
] | null | null | null | Client/message/friend.py | Ricky-Hao/IMPK-Client | 791150a43fff157aa439716d63d6c4fece912b85 | [
"MIT"
] | 1 | 2018-06-01T07:45:03.000Z | 2018-06-01T07:45:03.000Z | Server/message/friend.py | Ricky-Hao/IMPK-Server | 786e24269e7cc506a82ae8aa0fa0d1df8c478f51 | [
"MIT"
] | null | null | null | from .base import BaseMessage
class FriendMessage(BaseMessage):
def __init__(self, data=None):
super().__init__(data)
def _init_type(self):
self.type = 'FriendMessage'
def _parse_dict(self, data):
super()._parse_dict(data)
self.friend_list = data.get('friend_list')
def to_dict(self):
data = super().to_dict()
data['friend_list'] = self.friend_list
return data
class FriendRequestMessage(BaseMessage):
def __init__(self, data=None):
super().__init__(data)
def _init_type(self):
self.type = 'FriendRequestMessage'
def _parse_dict(self, data):
super()._parse_dict(data)
self.friend_name = data.get('friend_name')
def to_dict(self):
data = super().to_dict()
data['friend_name'] = self.friend_name
return data
class FriendAcceptMessage(BaseMessage):
def __init__(self, data=None):
super().__init__(data)
def _init_type(self):
self.type = 'FriendAcceptMessage'
def _parse_dict(self, data):
super()._parse_dict(data)
self.friend_name = data.get('friend_name')
self.accept = data.get('accept')
def to_dict(self):
data = super().to_dict()
data['friend_name'] = self.friend_name
data['accept'] = self.accept
return data
class FriendUpdateMessage(BaseMessage):
def __init__(self, data=None):
super().__init__(data)
def _init_type(self):
self.type = 'FriendUpdateMessage' | 25.131148 | 50 | 0.632094 | 1,493 | 0.973907 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.112851 |
0fcf0420d2785cb7b20ebd33760e3ab694e85c59 | 424 | py | Python | Pyon exercicios/Exercicios/066.py | alefbispo/Exercicios-do-curso-de-Python | 16cd569ab16542135b834ac8d0cfb0ae84836d53 | [
"MIT"
] | null | null | null | Pyon exercicios/Exercicios/066.py | alefbispo/Exercicios-do-curso-de-Python | 16cd569ab16542135b834ac8d0cfb0ae84836d53 | [
"MIT"
] | null | null | null | Pyon exercicios/Exercicios/066.py | alefbispo/Exercicios-do-curso-de-Python | 16cd569ab16542135b834ac8d0cfb0ae84836d53 | [
"MIT"
] | null | null | null | '''Crie um programa que leia varios numeros inteiros
o programa so vai parar qunado o usuario digitar 999
no final mostre a soma entre eles e quanto foram digitados'''
soma = total = 0
while True:
valor = int(input('## 999 para parar ###\n'
'Digite um valor: '))
if valor == 999:
break
total += 1
soma += valor
print(f'Vc digitou {total} valores e a soma entre eles é {soma}') | 30.285714 | 65 | 0.632075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.635294 |
0fd0370ba84af5ab763ef6f4203eaefec7cafeb5 | 920 | py | Python | bookorbooks/account/migrations/0008_auto_20210714_1507.py | talhakoylu/SummerInternshipBackend | 4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3 | [
"MIT"
] | 1 | 2021-08-10T22:24:17.000Z | 2021-08-10T22:24:17.000Z | bookorbooks/account/migrations/0008_auto_20210714_1507.py | talhakoylu/SummerInternshipBackend | 4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3 | [
"MIT"
] | null | null | null | bookorbooks/account/migrations/0008_auto_20210714_1507.py | talhakoylu/SummerInternshipBackend | 4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-14 15:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0007_alter_childlist_child'),
]
operations = [
migrations.AlterField(
model_name='childprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='user_child', serialize=False, to=settings.AUTH_USER_MODEL, verbose_name='Kullanıcı'),
),
migrations.AlterField(
model_name='parentprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='user_parent', serialize=False, to=settings.AUTH_USER_MODEL, verbose_name='Kullanıcı'),
),
]
| 35.384615 | 202 | 0.686957 | 765 | 0.827922 | 0 | 0 | 0 | 0 | 0 | 0 | 176 | 0.190476 |
0fd0f45ffc164fd1edab2aac24bb0c9ae2c48124 | 9,600 | py | Python | src/subpackages/ts_process/shape.py | cfiscella/PredictionIntervals | 45c66bcea5a3e022f995155e1ec39f63602ef836 | [
"CC0-1.0"
] | null | null | null | src/subpackages/ts_process/shape.py | cfiscella/PredictionIntervals | 45c66bcea5a3e022f995155e1ec39f63602ef836 | [
"CC0-1.0"
] | null | null | null | src/subpackages/ts_process/shape.py | cfiscella/PredictionIntervals | 45c66bcea5a3e022f995155e1ec39f63602ef836 | [
"CC0-1.0"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler,StandardScaler
def shape_data(df,window):
'''
Returns Numpy array of dimension (len(df),window,len(df.columns)). Creates features from lags of time series to be used for modeling.
Parameters:
df (DataFrame): A Pandas DataFrame
window (int): Length of lookback window
Returns:
ds (ndarray): Numpy array of dimension (len(df),window,len(df.columns)).
'''
win_t = window
ds = []
for ind in range(win_t, len(df)+1):
ds.append(df.iloc[ind-win_t:ind,:].to_numpy())
result = np.array(ds)
return result
def dissolve(shaped_data,window):
'''
Inverse of shape_data. Returns pre-shaped, unwindowed data as numpy array.
Parameters:
shaped_data (list like): Shaped data of dimension (len(data),window,features)
window (int): Length of lookback window
Returns:
ds (nd.array): Numpy array of dimension (len(df),features).
'''
ds = []
for i in range(window):
ds.append(shaped_data[0][i])
for j in range(1,len(shaped_data)):
ds.append(shaped_data[j][-1])
result = np.array(ds)
return result
class WindowGenerator:
"""
A class to format time series for modeling.
...
Attributes
----------
data : str
first name of the person
input_list : str
family name of the person
target_list : int
age of the person
target_columns_indices : list
list of indicies of target columns to predict
column_indices : list
list of indicies of features
input_length : int
length of look-back window
target_length : int
length of consecutive target days predicted
shift : int
number of samples between when look-back period ends and target begins
total_window_size : int
input_length+shift
input_slice : slice
slice object to index input data
input_indices : ndarray
array of input indices
target_start : int
index of where target data begins
targets_slice : slice
slice object to index target data
target_indices : ndarray
array of target indices
inputs : ndarray
split and shaped inputs from data
targets : ndarray
split and shaped targets from data
train : (ndarray,ndarray)
(input,target) tuple for model training
val : (ndarray,ndarray)
(input,target) tuple for model validation
test : (ndarray,ndarray)
(input,target) tuple for model testing
Methods
-------
data_split(self, split = [.6,.2,.2],val_behind = True,standardization = "None"):
Splits inputs and targets into train, test and val sets.
"""
def __init__(self,data,input_length,shift,target_length,input_list,target_list):
# Store the raw data.
"""
Constructs the minimum necessary attributes for the WindoGenerator object.
Parameters
----------
data : DataFrame
raw time series DataFrame including inputs and targets used to construct inputs and targets for modeling
input_length : int
length of look-back window
shift : int
number of time steps between last input value and corresponding first target
target_length : int
length of target array per sample
input_list : list
list of columns in data to be used as inputs
target_list : list
list of columns in data to be used as targets
"""
self.data = data
self._train = None
self._val = None
self._test = None
self.input_list = input_list
# Work out the label column indices.
self.target_list = target_list
if target_list is not None:
self.target_columns_indices = {name: i for i, name in
enumerate(target_list)}
self.column_indices = {name: i for i, name in
enumerate(data.columns)}
# Work out the window parameters.
self.input_length = input_length
self.target_length = target_length
self.shift = shift
self.total_window_size = input_length + shift
self.input_slice = slice(0, input_length)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
self.target_start = self.total_window_size - self.target_length
self.targets_slice = slice(self.target_start, None)
self.target_indices = np.arange(self.total_window_size)[self.targets_slice]
###initialize targets and inputs which will be arrays of arrays
self._targets =None
self._inputs = None
def split_window(self, features):
###private method used to generate input and target arrays
inputs = shape_data(features[self.input_list],self.input_length)
labels = shape_data(features[self.target_list][self.input_length+self.shift:],self.target_length)
###trim, make lengths of each the same s
if len(inputs)>len(labels):
inputs = inputs[:len(labels)]
else:
labels = labels[:len(inputs)]
self._inputs = inputs
self._targets = labels
return inputs, labels
WindowGenerator.split_window = split_window
@property
def inputs(self):
if type(self._inputs) == type(None):
return self.split_window(self.data)[0]
else:
return self._inputs
@inputs.setter
def inputs(self,data):
self._inputs = self.split_window(data)[0]
@property
def targets(self):
if type(self._targets) == type(None):
return self.split_window(self.data)[1]
else:
return self._targets
WindowGenerator.inputs = inputs
WindowGenerator.targets = targets
@property
def train(self):
return self._train
@train.setter
def train(self, indecies):
self._train = self.inputs[indecies[0]:indecies[1],:,:],self.targets[indecies[0]:indecies[1],:,:]
@property
def val(self):
return self._val
@val.setter
def val(self, indecies):
self._val = self.inputs[indecies[0]:indecies[1],:,:],self.targets[indecies[0]:indecies[1],:,:]
@property
def test(self):
return self._test
WindowGenerator.train = train
WindowGenerator.val = val
WindowGenerator.test = test
@test.setter
def test(self, indecies):
self._test = self.inputs[indecies[0]:indecies[1],:,:],self.targets[indecies[0]:indecies[1],:,:]
def data_split(self, split = [.6,.2,.2],val_behind = True,standardization = "None"):
"""
Splits data into train, test and (optionally) validation sets and assigns them as attributes.
If the argument 'val_behind' is True, then validation attribute is assigned.
If the argument 'standardization' is specified, data will standardized by the specified method and transofrmed.
Parameters
----------
split : list
Split between training set, testing set and validation set
val_behind : bool
Indicates whether validation set will be behind or infront of training set
standardization : string
Indicates standardization method used
Returns
-------
None
-------
"""
val_length = int(len(self.data)*split[2])
train_length = int(len(self.data)*split[0])
test_length = int(len(self.data)*split[1])
if val_behind:
val_slice = [0,val_length]
train_slice = [val_length,val_length+train_length]
test_slice = [val_length+train_length,val_length+train_length+test_length]
else:
train_slice = [0,train_length]
val_slice = [train_length,val_length+train_length]
test_slice = [val_length+train_length,val_length+train_length+test_length]
if standardization == "None":
self.val = val_slice
self.train = train_slice
self.test = test_slice
elif standardization == "MinMax":
minmax = MinMaxScaler()
minmax.fit(self.data[self.input_list].iloc[train_slice[0]:train_slice[1]])
target = self.data["target"].reset_index()
self.scaler = minmax
scaled_data = pd.concat([pd.DataFrame(self.scaler.transform(self.data[self.input_list]),
columns = self.input_list),target],
axis = 1,).iloc[:-1,:]
self.inputs = scaled_data
self.val = val_slice
self.train = train_slice
self.test = test_slice
elif standardization == "Standard":
standard = StandardScaler()
standard.fit(self.data[self.input_list].iloc[train_slice[0]:train_slice[1]])
target = self.data["target"].reset_index()
self.scaler = standard
scaled_data = pd.concat([pd.DataFrame(self.scaler.transform(self.data[self.input_list]),
columns = self.input_list),target],
axis = 1,).iloc[:-1,:]
self.inputs = scaled_data
self.val = val_slice
self.train = train_slice
self.test = test_slice
return None
WindowGenerator.data_split = data_split
| 33.217993 | 137 | 0.613125 | 8,276 | 0.862083 | 0 | 0 | 1,195 | 0.124479 | 0 | 0 | 4,069 | 0.423854 |
0fd3b9d2e66fa26e441f1b047f98378e175a4aff | 231 | py | Python | python_3/aula21c.py | felipesch92/CursoEmVideo | df443e4771adc4506c96d8f419aa7acb97b28366 | [
"MIT"
] | null | null | null | python_3/aula21c.py | felipesch92/CursoEmVideo | df443e4771adc4506c96d8f419aa7acb97b28366 | [
"MIT"
] | null | null | null | python_3/aula21c.py | felipesch92/CursoEmVideo | df443e4771adc4506c96d8f419aa7acb97b28366 | [
"MIT"
] | null | null | null | # Programa Principal
def teste():
x = 8
print(f'Na função teste, n vale {n}')
print(f'Na função teste x vale {x}')
n = 2
print(f'No programa principal, n vale {n}')
teste()
print(f'No programa principal, x vale {x}')
| 19.25 | 43 | 0.632035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.659574 |
0fd50bc80b73f1b79ad3d2b0a5d73fc990d6ecb3 | 2,813 | py | Python | test/test_conf.py | nandub/Limnoria | 406369d53f4374f34c7879e3fb5e700290fd655d | [
"BSD-3-Clause"
] | 476 | 2015-01-04T17:42:59.000Z | 2021-08-13T07:40:54.000Z | test/test_conf.py | nandub/Limnoria | 406369d53f4374f34c7879e3fb5e700290fd655d | [
"BSD-3-Clause"
] | 491 | 2015-01-01T04:12:23.000Z | 2021-08-12T19:24:47.000Z | test/test_conf.py | nandub/Limnoria | 406369d53f4374f34c7879e3fb5e700290fd655d | [
"BSD-3-Clause"
] | 203 | 2015-01-02T18:29:43.000Z | 2021-08-15T12:52:22.000Z | ##
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import supybot.conf as conf
import supybot.registry as registry
import supybot.ircutils as ircutils
class SupyConfTestCase(SupyTestCase):
def testJoinToOneChannel(self):
orig = conf.supybot.networks.test.channels()
channels = ircutils.IrcSet()
channels.add("#bar")
conf.supybot.networks.test.channels.setValue(channels)
msgs = conf.supybot.networks.test.channels.joins()
self.assertEqual(msgs[0].args, ("#bar",))
conf.supybot.networks.test.channels.setValue(orig)
def testJoinToManyChannels(self):
orig = conf.supybot.networks.test.channels()
channels = ircutils.IrcSet()
input_list = []
for x in range(1, 30):
name = "#verylongchannelname" + str(x)
channels.add(name)
input_list.append(name)
conf.supybot.networks.test.channels.setValue(channels)
msgs = conf.supybot.networks.test.channels.joins()
# Double check we split the messages
self.assertEqual(len(msgs), 2)
# Ensure all channel names are present
chan_list = (msgs[0].args[0] + ',' + msgs[1].args[0]).split(',')
self.assertCountEqual(input_list, chan_list)
conf.supybot.networks.test.channels.setValue(orig)
| 46.883333 | 79 | 0.721649 | 1,163 | 0.413438 | 0 | 0 | 0 | 0 | 0 | 0 | 1,607 | 0.571276 |
0fd589de07960b791ebcad0f71b8f53fb13dc4c1 | 8,100 | py | Python | app/model.py | tai-calg/gameserver | c768df2d4064a02f6e8d810a13bb410fe79b73ac | [
"MIT"
] | null | null | null | app/model.py | tai-calg/gameserver | c768df2d4064a02f6e8d810a13bb410fe79b73ac | [
"MIT"
] | null | null | null | app/model.py | tai-calg/gameserver | c768df2d4064a02f6e8d810a13bb410fe79b73ac | [
"MIT"
] | null | null | null | # flake8: noqa
import json
from unicodedata import name
import uuid
from enum import Enum, IntEnum
from typing import Optional
from unittest import result
from fastapi import HTTPException
from pydantic import BaseModel
from sqlalchemy import text, true
from sqlalchemy.exc import NoResultFound
from .db import engine
class InvalidToken(Exception):
"""指定されたtokenが不正だったときに投げる"""
class SafeUser(BaseModel):
"""token を含まないUser"""
id: int
name: str
leader_card_id: int
class Config:
orm_mode = True
class LiveDifficulty(IntEnum):
normal = 1
hard = 2
class JoinRoomResult(IntEnum):
Ok = 1,
RoomFull = 2,
Disbanded = 3,
OtherError = 4
class WaitRoomStatus(IntEnum):
Waiting = 1,
LiveStart = 2,
Dissolution = 3
class RoomInfo(BaseModel):
room_id: int
live_id: int
joined_user_count: int
max_user_count: int
class Config:
orm_mode = True
class RoomUser(BaseModel):
user_id: int
user_name:str
leader_card_id: int
select_difficulty:LiveDifficulty
is_me: bool
is_host:bool
def __init__(self, user_id, user_name, leader_card_id, select_difficulty, is_me, is_host):
self.user_id = user_id
self.user_name = user_name
self.leader_card_id = leader_card_id
self.select_difficulty = select_difficulty
self.is_me = is_me
self.is_host = is_host
class Config:
orm_mode = True
class ResultUser(BaseModel):
user_id: int
judge_count_list: list[int]
score: int
MAX_USER_COUNT = 4
def create_user(name: str, leader_card_id: int) -> str:
"""Create new user and returns their token"""
token = str(uuid.uuid4())
# NOTE: tokenが衝突したらリトライする必要がある.
with engine.begin() as conn:
result = conn.execute(
text(
"INSERT INTO `user` (name, token, leader_card_id) VALUES (:name, :token, :leader_card_id)"
),
{"name": name, "token": token, "leader_card_id": leader_card_id},
)
# print(result)
return token
def _get_user_by_token(conn, token: str) -> Optional[SafeUser]:
reqest = conn.execute(
text("SELECT `id`, `name`, `leader_card_id` FROM user WHERE `token` =:token"),
dict(token=token),
)
try:
res = reqest.one()
except NoResultFound:
return None
return SafeUser.from_orm(res)
def get_user_by_token(token: str) -> Optional[SafeUser]:
with engine.begin() as conn:
return _get_user_by_token(conn, token)
def update_user(_token: str, _name: str, _leader_card_id: int) -> None:
# このコードを実装してもらう
with engine.begin() as conn:
# user = _get_user_by_token(conn, token)
conn.execute(
text(
""" UPDATE user SET name = :name,
leader_card_id = :leader_card_id WHERE token = :token """
),
dict(name=_name, leader_card_id=_leader_card_id, token=_token),
)
return None
def create_room(liveid: int, select_difi: LiveDifficulty):
"""Create new room and returns its id"""
token = str(uuid.uuid4()) # 同じ設定値のルームが建てるようになるためにトークンを作る
with engine.begin() as conn:
result = conn.execute(
text("""INSERT INTO `room` (select_difficulty , live_id, token, joined_user_count, max_user_count)
VALUES (:select_difficulty, :live_id, :token, :joined_user_count, :max_user_count)"""),
dict(live_id=liveid, select_difficulty=int(select_difi), token=token, \
joined_user_count=0, max_user_count=MAX_USER_COUNT),
)
# room_userに登録
conn.execute(
text("""INSERT INTO `room_user` (user_id, user_name, leader_card_id, select_difficulty, is_me, is_host, room_id)
VALUE (:user_id, :user_name, :leader_card_id, :select_difficulty, :is_me, :is_host, :room_id) """)
)
return
def get_last_insert_id()-> int:
with engine.begin() as conn:
result = conn.execute(
text("SELECT LAST_INSERT_ID()"),
)
return result.scalar()
"""
room_host = create_user_info(True, select_difi )
insert_user_info(room_host,)
"""
def get_room_list(live_id: int)-> list[RoomInfo]:
with engine.begin() as conn:
result = conn.execute(
text("SELECT room_id, live_id, joined_user_count, max_user_count FROM room WHERE live_id = :live_id "),
dict(live_id=live_id),
)
return [RoomInfo.from_orm(res) for res in result]
def api_join_room(room_id: int, select_difi:int , user_token :str)-> JoinRoomResult:
# ここでRoomUserつくる
with engine.begin() as conn:
result = conn.execute(
text(""" SELECT joined_user_count FROM room WHERE room_id = :room_id """),
dict(room_id=room_id),
) # get number of joined user
num_people = result.one()[0]
if num_people == 0 :
print("yes")
conn.execute(
text(""" UPDATE room SET joined_user_count = joined_user_count + 1 WHERE room_id = :room_id"""), # ayashii
dict(room_id=room_id),
)
room_user :RoomUser = create_user_info(is_host = True, select_difi = select_difi)
insert_user_info(room_user, room_id)
return JoinRoomResult.Ok
elif num_people < 4:
conn.execute(
text(""" UPDATE room SET joined_user_count = joined_user_count + 1 WHERE room_id = :room_id"""), # ayashii
dict(room_id=room_id),
)
insert_user_info(room_user, room_id, is_host = False)
return JoinRoomResult.Ok
else:
return JoinRoomResult.RoomFull #TODO: disbanded , other errorのコーディングを後でやる
def insert_user_info(userinfo: RoomUser, room_id: int)-> None:
with engine.begin() as conn:
result = conn.execute(
text("""INSERT INTO `room_user` (user_id, room_id, leader_card_id , select_difficulty, is_me, is_host)
VALUES (:user_id, :room_id, :leader_card_id, :select_difficulty, :is_me, :is_host)"""),
dict(user_id=userinfo.user_id, room_id=room_id, leader_card_id = userinfo.leader_card_id , \
select_difficulty=userinfo.select_difficulty, \
is_me = userinfo.is_me, is_host=userinfo.is_host),
)
return
def create_user_info(is_host: bool, select_difi: int, user_token:str)-> RoomUser:
"""room TABLE からGetして、それをRoomUserにする"""
with engine.begin() as conn:
result = conn.execute(
text("""SELECT id, name ,leader_card_id FROM user WHERE token = :token"""),
dict(token=user_token),
)
res = result.one() # tokenは一意なので
return RoomUser(user_id=res[0], user_name=res[1], leader_card_id=res[2],select_difficulty=select_difi, is_me=True, is_host=is_host)
def pooling_wait(room_id: int)-> WaitRoomStatus: #DOING
"""ホストが開始ボタンを押せばゲーム開始でEnumステータスが変更される。その変更をホスト以外がこの関数で受け取る。"""
with engine.begin() as conn:
result = conn.execute(
text("""SELECT wait_status FROM room WHERE room_id = :room_id"""),
dict(room_id=room_id),
)
if result.one()[0] == int(WaitRoomStatus.LiveStart):
return WaitRoomStatus.LiveStart
else:
return WaitRoomStatus.Waiting
def get_room_user_list(room_id:int):
with engine.begin() as conn:
result = conn.execute(
text("""SELECT user_name, leader_card_id, select_difficulty,
FROM room_user WHERE room_id = :room_id"""),
dict(room_id=room_id),
)
return [RoomUser.from_orm(res) for res in result]
def start_game(room_id: int)-> None:
with engine.begin() as conn:
gamestart = int(WaitRoomStatus.LiveStart)
result = conn.execute(
text("""UPDATE room SET wait_status = :gamestart WHERE room_id = :room_id"""),
dict(gamestart = gamestart ,room_id=room_id),
)
return | 31.640625 | 139 | 0.628642 | 1,257 | 0.148898 | 0 | 0 | 0 | 0 | 0 | 0 | 2,564 | 0.303719 |
0fd5c7be4c5a1f2321b0b0ac62b726398b53d739 | 9,256 | py | Python | gwlfe/StreamBank.py | rajadain/gwlf-e | ba2fb9dbc08a3d7a4ced4b83b6f0f1307814e2a3 | [
"Apache-2.0"
] | null | null | null | gwlfe/StreamBank.py | rajadain/gwlf-e | ba2fb9dbc08a3d7a4ced4b83b6f0f1307814e2a3 | [
"Apache-2.0"
] | null | null | null | gwlfe/StreamBank.py | rajadain/gwlf-e | ba2fb9dbc08a3d7a4ced4b83b6f0f1307814e2a3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
"""
Imported from StreamBank.bas
"""
import logging
from .Input.LandUse.AreaTotal import AreaTotal_f
from .Input.LandUse.Ag.AGSTRM import AGSTRM_f
from .Input.LandUse.Ag.TileDrain import TileDrain_f
from .Output.AvAnimalNSum.AnimalN import AnimalN_f
from .Output.Loading.StreamBankEros import StreamBankEros_f
from .Output.Loading.StreamBankEros_1 import StreamBankEros_1_f
from .Output.Loading.StreamBankN_1 import StreamBankN_1_f
log = logging.getLogger(__name__)
def CalculateStreamBankEros(z, Y):
# CALCULATE THE STREAM BANK SEDIMENT AND N AND P
for i in range(12):
# CALCULATE ER FACTOR FOR STREAMBANK EROSION
z.StreamBankP[Y][i] = \
StreamBankEros_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper,
z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef,
z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse, z.TileDrainDensity, z.PointFlow,
z.StreamWithdrawal,
z.GroundWithdrawal, z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF,
z.AvSlope,
z.SedAAdjust, z.StreamLength)[Y][i] * (z.SedPhos / 1000000) * z.BankPFrac
# CALCULATIONS FOR STREAM BANK STABILIZATION AND FENCING
z.PURBBANK = 0
z.FCURBBANK = 0
z.PSTAB = 0
z.PURBBANK = 0
if z.n42b > 0:
z.PSTAB = (z.n46c / z.n42b) * z.StreamBankP[Y][i] * z.n77c
z.PURBBANK = (z.UrbBankStab / z.n42b) * z.StreamBankP[Y][i] * z.n77c
z.PFEN = 0
if z.n42 > 0:
z.PFEN = (z.n45 / z.n42) * z.StreamBankP[Y][i] * AGSTRM_f(z.AgLength, z.StreamLength) * z.n77
z.StreamBankP[Y][i] = z.StreamBankP[Y][i] - (z.PSTAB + z.PFEN + z.PURBBANK)
if z.StreamBankP[Y][i] < 0:
z.StreamBankP[Y][i] = 0
# CALCULATE ANNUAL STREAMBANK N AND P AND SEDIMENT
z.StreamBankPSum[Y] += z.StreamBankP[Y][i]
z.StreamBankErosSum[Y] += \
StreamBankEros_1_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0,
z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0, z.RecessionCoef, z.SeepCoef
, z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse, z.TileDrainDensity, z.PointFlow,
z.StreamWithdrawal, z.GroundWithdrawal, z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj,
z.SedAFactor_0, z.AvKF, z.AvSlope, z.SedAAdjust, z.StreamLength, z.n42b, z.n46c, z.n85d,
z.AgLength, z.n42, z.n45, z.n85, z.UrbBankStab)[Y][i]
# GROUNDWATER N LOADS ARE REDUCED BASED ON SPECIFIC BMPS
z.GWNRF = 0
z.CHNGN1 = 0
z.CHNGN2 = 0
z.CHNGN3 = 0
z.CHNGN4 = 0
z.CHNGN5 = 0
z.CHNGNTOT = 0
z.PCTN1 = 0
z.PCTN2 = 0
z.PCTN3 = 0
z.PCTN4 = 0
z.PCBMPAC = 0
z.HPBMPAC = 0
z.BMPACRES = 0
z.PCTAG = 0
z.RCNMAC = 0
z.HPNMAC = 0
z.CHNGN1 = z.n25 / 100
z.CHNGN2 = z.n26 / 100
z.CHNGN3 = z.n27 / 100
z.CHNGN4 = z.n27b / 100
z.CHNGN5 = z.n28 / 100
z.CHNGNTOT = z.CHNGN1 + z.CHNGN2 + z.CHNGN3 + z.CHNGN4 + z.CHNGN5
if AreaTotal_f(z.Area) > 0 and z.n23 > 0 and z.n42 > 0 and z.n42b > 0:
z.PCTAG = (z.n23 + z.n24) / AreaTotal_f(z.Area)
z.GroundNitr[Y][i] -= z.GroundNitr[Y][i] * ((z.n28b / 100) * z.n23) / z.n23 * z.PCTAG * z.n70
z.GroundNitr[Y][i] -= z.GroundNitr[Y][i] * (z.n43 / z.n42) * (z.n42 / z.n42b) * z.PCTAG * z.n64
z.GroundNitr[Y][i] -= (z.GroundNitr[Y][i] * (
(((z.n29 / 100) * z.n23) + ((z.n37 / 100) * z.n24)) / (z.n23 + z.n24))) * z.PCTAG * z.n68
# Groundwater P loads are reduced based on extent of nutrient management BMP
z.RCNMAC = (z.n28b / 100) * z.n23
z.HPNMAC = (z.n35b / 100) * z.n24
if AreaTotal_f(z.Area) > 0:
z.GroundPhos[Y][i] -= (((z.RCNMAC + z.HPNMAC) / AreaTotal_f(z.Area)) *
z.GroundPhos[Y][i] * z.n78)
z.GroundNitrSum[Y] += z.GroundNitr[Y][i]
z.GroundPhosSum[Y] += z.GroundPhos[Y][i]
z.TileDrainSum[Y] += \
TileDrain_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper,
z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef, z.Landuse,
z.TileDrainDensity)[Y][i]
z.TileDrainNSum[Y] += z.TileDrainN[Y][i]
z.TileDrainPSum[Y] += z.TileDrainP[Y][i]
z.TileDrainSedSum[Y] += z.TileDrainSed[Y][i]
z.AnimalPSum[Y] += z.AnimalP[Y][i]
z.AnimalFCSum[Y] += z.AnimalFC[Y][i]
z.WWOrgsSum[Y] += z.WWOrgs[Y][i]
z.SSOrgsSum[Y] += z.SSOrgs[Y][i]
z.UrbOrgsSum[Y] += z.UrbOrgs[Y][i]
z.TotalOrgsSum[Y] += z.TotalOrgs[Y][i]
z.WildOrgsSum[Y] += z.WildOrgs[Y][i]
z.GRLostBarnPSum[Y] += z.GRLostBarnP[Y][i]
z.GRLostBarnFCSum[Y] += z.GRLostBarnFC[Y][i]
z.NGLostBarnPSum[Y] += z.NGLostBarnP[Y][i]
z.NGLostBarnFCSum[Y] += z.NGLostBarnFC[Y][i]
z.NGLostManPSum[Y] += z.NGLostManP[Y][i]
z.TotNitr[Y][i] += StreamBankN_1_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef, z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse,
z.TileDrainDensity, z.PointFlow, z.StreamWithdrawal, z.GroundWithdrawal,
z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF,
z.AvSlope, z.SedAAdjust, z.StreamLength, z.n42b, z.AgLength,
z.UrbBankStab, z.SedNitr, z.BankNFrac, z.n69c, z.n45, z.n69, z.n46c, z.n42)[
Y][i] + \
z.TileDrainN[Y][i] + \
AnimalN_f(z.NYrs, z.NGPctManApp, z.GrazingAnimal_0, z.NumAnimals, z.AvgAnimalWt,
z.AnimalDailyN, z.NGAppNRate, z.Prec, z.DaysMonth,
z.NGPctSoilIncRate, z.GRPctManApp, z.GRAppNRate, z.GRPctSoilIncRate, z.NGBarnNRate,
z.AWMSNgPct, z.NgAWMSCoeffN,
z.RunContPct, z.RunConCoeffN, z.PctGrazing, z.GRBarnNRate, z.AWMSGrPct,
z.GrAWMSCoeffN, z.PctStreams, z.GrazingNRate)[Y][i]
z.TotPhos[Y][i] += z.StreamBankP[Y][i] + z.TileDrainP[Y][i] + z.AnimalP[Y][i]
z.TotNitrSum[Y] += StreamBankN_1_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef, z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse,
z.TileDrainDensity, z.PointFlow, z.StreamWithdrawal, z.GroundWithdrawal,
z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF,
z.AvSlope, z.SedAAdjust, z.StreamLength, z.n42b, z.AgLength,
z.UrbBankStab, z.SedNitr, z.BankNFrac, z.n69c, z.n45, z.n69, z.n46c, z.n42)[
Y][i] + \
z.TileDrainN[Y][i] + \
AnimalN_f(z.NYrs, z.NGPctManApp, z.GrazingAnimal_0, z.NumAnimals, z.AvgAnimalWt,
z.AnimalDailyN, z.NGAppNRate, z.Prec, z.DaysMonth,
z.NGPctSoilIncRate, z.GRPctManApp, z.GRAppNRate, z.GRPctSoilIncRate, z.NGBarnNRate,
z.AWMSNgPct, z.NgAWMSCoeffN,
z.RunContPct, z.RunConCoeffN, z.PctGrazing, z.GRBarnNRate, z.AWMSGrPct,
z.GrAWMSCoeffN, z.PctStreams, z.GrazingNRate)[Y][i]
z.TotPhosSum[Y] += z.StreamBankP[Y][i] + z.TileDrainP[Y][i] + z.AnimalP[Y][i]
| 55.095238 | 120 | 0.529927 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 389 | 0.042027 |
0fd5e6f3f66c1554a2c4b98d1a2b4ec336d45014 | 9,911 | py | Python | AutoGraphModel.py | shiqitao/AutoGraph | 41f5956c859ff0fb6f87109d5f8731276bdcc2ef | [
"MIT"
] | null | null | null | AutoGraphModel.py | shiqitao/AutoGraph | 41f5956c859ff0fb6f87109d5f8731276bdcc2ef | [
"MIT"
] | null | null | null | AutoGraphModel.py | shiqitao/AutoGraph | 41f5956c859ff0fb6f87109d5f8731276bdcc2ef | [
"MIT"
] | null | null | null | import argparse
import os
import time
import torch
from filelock import FileLock
from ModelAPPNP import main_model_appnp
from ModelAPPNP2 import main_model_appnp as main_model_appnp_2
from ModelAPPNP3 import main_model_appnp as main_model_appnp_3
from ModelAPPNP4 import main_model_appnp as main_model_appnp_4
from ModelGAT import main_model_gat
from ModelGAT2 import main_model_gat as main_model_gat_2
from ModelGAT3 import main_model_gat as main_model_gat_3
from ModelGAT4 import main_model_gat as main_model_gat_4
from ModelGCN import main_model_gcn
from ModelGCN2 import main_model_gcn as main_model_gcn_2
from ModelGCN3 import main_model_gcn as main_model_gcn_3
from ModelGCN4 import main_model_gcn as main_model_gcn_4
from ModelGCNOld import main_model_gcn_old
from Result import Result
from tools import save_data, load_data, file_path
if __name__ == "__main__":
start_time = time.time()
time_budget = float("inf")
parser = argparse.ArgumentParser()
parser.add_argument("--index", type=int)
parser.add_argument("--file_param", type=str)
parser.add_argument("--file_ready", type=str)
parser.add_argument("--file_result", type=str)
parser.add_argument("--file_lock", type=str)
parser.add_argument("--if_kill", type=int)
args = parser.parse_args()
if torch.cuda.is_available():
torch.zeros(1).cuda()
with FileLock(args.file_lock):
save_data(args.file_ready, os.getpid())
aoe_data = None
while True:
if aoe_data is None and os.path.exists(file_path("AOE.data")):
with FileLock(file_path("AOE.ready")):
aoe_data = load_data(file_path("AOE.data"))
if os.path.exists(args.file_param):
if aoe_data is None and os.path.exists(file_path("AOE.data")):
with FileLock(file_path("AOE.ready")):
aoe_data = load_data(file_path("AOE.data"))
start_time = time.time() # 重置开始时间
with FileLock(args.file_lock):
param = load_data(args.file_param)
if param.time_budget is not None:
time_budget = param.time_budget # 重置时间限制
try:
if param.model == "ModelGCNOld":
result = main_model_gcn_old(
data=aoe_data,
num_layers=param.param[0],
hidden_list=param.param[1],
activation=param.param[2],
if_all=True
)
with FileLock(args.file_lock):
# 一定要按这个顺序 save -> remove
save_data(args.file_result, result)
os.remove(args.file_param)
elif param.model == "ModelGAT":
result = main_model_gat(
data=aoe_data,
num_layers=param.param[0],
hidden_list=param.param[1],
activation=param.param[2],
if_all=True
)
with FileLock(args.file_lock):
# 一定要按这个顺序 save -> remove
save_data(args.file_result, result)
os.remove(args.file_param)
elif param.model == "ModelGCN":
result = main_model_gcn(
data=aoe_data,
num_layers=param.param[0],
hidden_list=param.param[1],
activation=param.param[2],
if_all=True
)
with FileLock(args.file_lock):
save_data(args.file_result, result)
os.remove(args.file_param)
elif param.model == "ModelAPPNP":
result = main_model_appnp(
data=aoe_data,
K=param.param[0],
alpha=param.param[1],
hidden=param.param[2],
activation=param.param[3],
if_all=True
)
with FileLock(args.file_lock):
save_data(args.file_result, result)
os.remove(args.file_param)
elif param.model == "ModelGAT2":
result = main_model_gat_2(
data=aoe_data,
num_layers=param.param[0],
hidden_list=param.param[1],
activation=param.param[2],
if_all=True
)
with FileLock(args.file_lock):
# 一定要按这个顺序 save -> remove
save_data(args.file_result, result)
os.remove(args.file_param)
elif param.model == "ModelGCN2":
result = main_model_gcn_2(
data=aoe_data,
num_layers=param.param[0],
hidden_list=param.param[1],
activation=param.param[2],
if_all=True
)
with FileLock(args.file_lock):
save_data(args.file_result, result)
os.remove(args.file_param)
elif param.model == "ModelAPPNP2":
result = main_model_appnp_2(
data=aoe_data,
K=param.param[0],
alpha=param.param[1],
hidden=param.param[2],
activation=param.param[3],
if_all=True
)
with FileLock(args.file_lock):
save_data(args.file_result, result)
os.remove(args.file_param)
elif param.model == "ModelGAT3":
result = main_model_gat_3(
data=aoe_data,
num_layers=param.param[0],
hidden_list=param.param[1],
activation=param.param[2],
if_all=True
)
with FileLock(args.file_lock):
# 一定要按这个顺序 save -> remove
save_data(args.file_result, result)
os.remove(args.file_param)
elif param.model == "ModelGCN3":
result = main_model_gcn_3(
data=aoe_data,
num_layers=param.param[0],
hidden_list=param.param[1],
activation=param.param[2],
if_all=True
)
with FileLock(args.file_lock):
save_data(args.file_result, result)
os.remove(args.file_param)
elif param.model == "ModelAPPNP3":
result = main_model_appnp_3(
data=aoe_data,
K=param.param[0],
alpha=param.param[1],
hidden=param.param[2],
activation=param.param[3],
if_all=True
)
with FileLock(args.file_lock):
save_data(args.file_result, result)
os.remove(args.file_param)
elif param.model == "ModelGAT4":
result = main_model_gat_4(
data=aoe_data,
num_layers=param.param[0],
hidden_list=param.param[1],
activation=param.param[2],
if_all=True
)
with FileLock(args.file_lock):
# 一定要按这个顺序 save -> remove
save_data(args.file_result, result)
os.remove(args.file_param)
elif param.model == "ModelGCN4":
result = main_model_gcn_4(
data=aoe_data,
num_layers=param.param[0],
hidden_list=param.param[1],
activation=param.param[2],
if_all=True
)
with FileLock(args.file_lock):
save_data(args.file_result, result)
os.remove(args.file_param)
elif param.model == "ModelAPPNP4":
result = main_model_appnp_4(
data=aoe_data,
K=param.param[0],
alpha=param.param[1],
hidden=param.param[2],
activation=param.param[3],
if_all=True
)
with FileLock(args.file_lock):
save_data(args.file_result, result)
os.remove(args.file_param)
else:
raise ValueError("Model name error: {0}".format(param[0]))
except RuntimeError:
if args.if_kill == 1:
break
else:
result = Result(
result=None,
loss_train=None,
loss_valid=None,
acc_train=None,
acc_valid=None,
epoch=None,
)
with FileLock(args.file_lock):
save_data(args.file_result, result)
os.remove(args.file_param)
if time.time() - start_time >= time_budget:
break
| 43.853982 | 78 | 0.469277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 571 | 0.057014 |
0fd75150c1f5d87b82715934f3f7080f5c7083b4 | 911 | py | Python | green_snake/files.py | tetio/green_snake | 014d5cf4c96858abb09ee1a4bda0ee84b80b5666 | [
"BSD-2-Clause"
] | null | null | null | green_snake/files.py | tetio/green_snake | 014d5cf4c96858abb09ee1a4bda0ee84b80b5666 | [
"BSD-2-Clause"
] | null | null | null | green_snake/files.py | tetio/green_snake | 014d5cf4c96858abb09ee1a4bda0ee84b80b5666 | [
"BSD-2-Clause"
] | null | null | null | import os
# import sys
# from shutil import copyfile
# import time
def getFilesFromFolder(path):
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk("./resources/"+path):
for file in f:
if not file.startswith(".") and file.endswith(".msg"): # ".msg" in file:
files.append(os.path.join(r, file))
return files
def loadMessage(token, company_code, file_path):
data = file_path.split("/")[-1].split(".")
contents = ""
df = open(file_path, "r")
for l in df:
contents += l
message = {
"documentType": data[0],
"sender": data[1],
"receiver": data[2],
"msgNumber": data[3],
"numVersion": data[4],
"messageFormat": data[5],
"signed": data[6],
"companyCode": company_code,
"securityToken": token,
"path": file_path
}
return message
| 26.028571 | 85 | 0.551043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 251 | 0.275521 |
0fd79666aa25c652123a40af64c4ed01d1f4c30c | 266 | py | Python | feedzero/users/urls.py | dammitjim/badfeed | 10e0059ff4af36ea2ff67e93a8e3bbb342137af9 | [
"Apache-2.0"
] | null | null | null | feedzero/users/urls.py | dammitjim/badfeed | 10e0059ff4af36ea2ff67e93a8e3bbb342137af9 | [
"Apache-2.0"
] | 17 | 2020-02-12T00:04:15.000Z | 2021-09-08T01:03:15.000Z | feedzero/users/urls.py | dammitjim/badfeed | 10e0059ff4af36ea2ff67e93a8e3bbb342137af9 | [
"Apache-2.0"
] | null | null | null | from django.urls import include, path
from feedzero.users.views import LogoutView
app_name = "users"
urlpatterns = [
path("logout/", LogoutView.as_view(), name="logout"),
path("integrations/pocket/", include("feedzero.users.integrations.pocket.urls")),
]
| 24.181818 | 85 | 0.729323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.327068 |
0fdab97e2573fd7c25a3583d3b5d334aba1173ee | 1,067 | py | Python | allies/serializers.py | PaulLerner/Allies | 478a85d0b98b4865854b52585563e0e42855b101 | [
"MIT"
] | null | null | null | allies/serializers.py | PaulLerner/Allies | 478a85d0b98b4865854b52585563e0e42855b101 | [
"MIT"
] | null | null | null | allies/serializers.py | PaulLerner/Allies | 478a85d0b98b4865854b52585563e0e42855b101 | [
"MIT"
] | null | null | null | import numpy as np
import pickle
import struct
class Serializer:
def serialize(self, model):
"""
Serialize a model dict to a string of uint8 bytes
:param model: a dict with model components
:return: a np.array of uint8
"""
raise NotImplementedError
def deserialize(self, model):
"""
Deserialize a model from a string of uint8 bytes
:param model: a np.array of uint8
:return: a dict with model components
"""
raise NotImplementedError
class DummySerializer(Serializer):
"""
Dummy serializer for debug purposes.
We assume inputs in the model dict are just paths to pretrained models
"""
def serialize(self, model):
pkl = pickle.dumps(model)
u8 = np.array(struct.unpack("{}B".format(len(pkl)), pkl), dtype=np.uint8)
return u8
def deserialize(self, model):
pkl_after = struct.pack('{}B'.format(len(model)), *list(model))
serialized_model = pickle.loads(pkl_after)
return serialized_model | 27.358974 | 81 | 0.633552 | 1,015 | 0.951265 | 0 | 0 | 0 | 0 | 0 | 0 | 458 | 0.429241 |
0fdb5547d3cde19752322b1bda55d95a8e7a5cba | 907 | py | Python | examples/example_1.py | alfredocarella/code-for-the-world | 00e27ad07f6a3d90b7aea860477e69b0903ebc0f | [
"MIT"
] | 12 | 2018-09-10T10:02:20.000Z | 2020-11-18T14:45:38.000Z | examples/example_1.py | GenevieveBuckley/2019-organizing-documenting-distributing | 6aa28c927a8287b1643fc1ec22bee26a2458861e | [
"MIT"
] | 5 | 2018-09-01T21:37:29.000Z | 2018-09-04T10:26:40.000Z | examples/example_1.py | GenevieveBuckley/2019-organizing-documenting-distributing | 6aa28c927a8287b1643fc1ec22bee26a2458861e | [
"MIT"
] | 39 | 2018-09-04T07:12:11.000Z | 2019-01-22T02:43:15.000Z | # -*- coding: utf-8 -*-
"""Example 1: Load and plot airfoil coordinates
"""
import os
import matplotlib.pyplot as plt
from mypack.utils.io import read_selig
from mypack.utils.plotting import plot_airfoil
def example_1():
"""Run example 1"""
# script inputs
mod_path = os.path.dirname(os.path.abspath(__file__)) # current module
air_path = os.path.join(mod_path, '..',
'tests', 'test_utils', 'files', 'demo_selig.dat')
# load coordinates from a a selig-style airfoil file
air_df = read_selig(air_path)
# plot the airfoil
plot_airfoil(air_df)
# save the png for the documentation
fig = plt.gcf()
save_name = os.path.basename(__file__).replace('.py', '.png') # file name
save_path = os.path.join(mod_path, save_name)
fig.savefig(save_path)
if __name__ == '__main__': # call function when run as script
example_1()
| 26.676471 | 78 | 0.662624 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 342 | 0.377067 |
0fdb6608181bfaf00540c064efdd68d60f72786d | 805 | py | Python | myenv/lib/python2.7/site-packages/tests/admin.py | dkumarlinux/saleor | e3a852fed7da38e4141b0755bd282012f508c7b9 | [
"BSD-3-Clause"
] | 1 | 2020-08-12T23:07:29.000Z | 2020-08-12T23:07:29.000Z | tests/admin.py | theskumar/django-versatileimagefield | 6ad1c959e9273f306ea7ee206ebd56a44f8e783d | [
"MIT"
] | 2 | 2022-02-10T16:51:56.000Z | 2022-02-10T18:23:52.000Z | myenv/lib/python2.7/site-packages/tests/admin.py | dkumarlinux/saleor | e3a852fed7da38e4141b0755bd282012f508c7b9 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from django.forms import ModelForm
from versatileimagefield.widgets import VersatileImagePPOISelectWidget
from .models import VersatileImageTestModel, VersatileImageWidgetTestModel
class VersatileImageTestModelForm(ModelForm):
class Meta:
model = VersatileImageTestModel
fields = (
'image',
'img_type',
'optional_image',
'optional_image_2',
'optional_image_3'
)
widgets = {
'optional_image': VersatileImagePPOISelectWidget(),
}
class VersatileImageTestModelAdmin(admin.ModelAdmin):
form = VersatileImageTestModelForm
admin.site.register(VersatileImageTestModel, VersatileImageTestModelAdmin)
admin.site.register(VersatileImageWidgetTestModel)
| 25.967742 | 74 | 0.719255 | 455 | 0.565217 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.10559 |
0fdc6d066514fd2d0d0042fd11414cdfb0f5228a | 1,278 | py | Python | alembic/versions/9cae282981fd_waze_object_ended_at.py | shaysw/anyway | 35dec531fd4ac79c99d09e684027df017e989ddc | [
"MIT"
] | 69 | 2015-03-30T17:09:46.000Z | 2021-08-15T16:45:47.000Z | alembic/versions/9cae282981fd_waze_object_ended_at.py | shaysw/anyway | 35dec531fd4ac79c99d09e684027df017e989ddc | [
"MIT"
] | 1,368 | 2015-01-12T16:33:52.000Z | 2022-03-31T21:10:18.000Z | alembic/versions/9cae282981fd_waze_object_ended_at.py | shaysw/anyway | 35dec531fd4ac79c99d09e684027df017e989ddc | [
"MIT"
] | 277 | 2015-02-16T17:52:06.000Z | 2022-02-16T18:06:44.000Z | """waze object ended at
Revision ID: 9cae282981fd
Revises: cac481832078
Create Date: 2020-10-20 18:34:54.574378
"""
# revision identifiers, used by Alembic.
revision = '9cae282981fd'
down_revision = 'cac481832078'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('waze_alerts', sa.Column('ended_at_estimate', sa.DateTime(), nullable=True))
op.create_index(op.f('ix_waze_alerts_ended_at_estimate'), 'waze_alerts', ['ended_at_estimate'], unique=False)
op.add_column('waze_traffic_jams', sa.Column('ended_at_estimate', sa.DateTime(), nullable=True))
op.create_index(op.f('ix_waze_traffic_jams_ended_at_estimate'), 'waze_traffic_jams', ['ended_at_estimate'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_waze_traffic_jams_ended_at_estimate'), table_name='waze_traffic_jams')
op.drop_column('waze_traffic_jams', 'ended_at_estimate')
op.drop_index(op.f('ix_waze_alerts_ended_at_estimate'), table_name='waze_alerts')
op.drop_column('waze_alerts', 'ended_at_estimate')
# ### end Alembic commands ###
| 36.514286 | 125 | 0.741002 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 757 | 0.592332 |
0fdc8f057905c0da9456649de2bde8f06dd894eb | 1,984 | py | Python | cohesity_management_sdk/models/object_status_enum.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-09-24T17:35:53.000Z | 2022-03-25T08:08:47.000Z | cohesity_management_sdk/models/object_status_enum.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-03-29T19:32:29.000Z | 2022-01-03T23:16:45.000Z | cohesity_management_sdk/models/object_status_enum.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 16 | 2019-02-27T06:54:12.000Z | 2021-11-16T18:10:24.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class ObjectStatusEnum(object):
"""Implementation of the 'ObjectStatus' enum.
Specifies the status of an object during a Restore Task.
'kFilesCloned' indicates that the cloning has completed.
'kFetchedEntityInfo' indicates that information about the object was
fetched from the primary source.
'kVMCreated' indicates that the new VM was created.
'kRelocationStarted' indicates that restoring to a different
resource pool has started.
'kFinished' indicates that the Restore Task has finished.
Whether it was successful or not is indicated by the error code that
that is stored with the Restore Task.
'kAborted' indicates that the Restore Task was aborted before
trying to restore this object. This can happen if the
Restore Task encounters a global error.
For example during a 'kCloneVMs' Restore Task, the datastore
could not be mounted. The entire Restore Task is aborted
before trying to create VMs on the primary source.
'kDataCopyStarted' indicates that the disk copy is started.
'kInProgress' captures a generic in-progress state and can be used by
restore
operations that don't track individual states.
Attributes:
KFILESCLONED: TODO: type description here.
KFETCHEDENTITYINFO: TODO: type description here.
KVMCREATED: TODO: type description here.
KRELOCATIONSTARTED: TODO: type description here.
KFINISHED: TODO: type description here.
KABORTED: TODO: type description here.
KDATACOPYSTARTED: TODO: type description here.
KINPROGRESS: TODO: type description here.
"""
KFILESCLONED = 'kFilesCloned'
KFETCHEDENTITYINFO = 'kFetchedEntityInfo'
KVMCREATED = 'kVMCreated'
KRELOCATIONSTARTED = 'kRelocationStarted'
KFINISHED = 'kFinished'
KABORTED = 'kAborted'
KDATACOPYSTARTED = 'kDataCopyStarted'
KINPROGRESS = 'kInProgress'
| 34.807018 | 73 | 0.724294 | 1,926 | 0.970766 | 0 | 0 | 0 | 0 | 0 | 0 | 1,768 | 0.891129 |
0fddc62ea325581552c6947e7972d2b82e89eb9b | 18,191 | py | Python | apps/backend/components/collections/base.py | ZhuoZhuoCrayon/bk-nodeman | 76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab | [
"MIT"
] | 31 | 2021-07-28T13:06:11.000Z | 2022-03-10T12:16:44.000Z | apps/backend/components/collections/base.py | ZhuoZhuoCrayon/bk-nodeman | 76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab | [
"MIT"
] | 483 | 2021-07-29T03:17:44.000Z | 2022-03-31T13:03:04.000Z | apps/backend/components/collections/base.py | ZhuoZhuoCrayon/bk-nodeman | 76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab | [
"MIT"
] | 29 | 2021-07-28T13:06:21.000Z | 2022-03-25T06:18:18.000Z | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import operator
import traceback
from functools import reduce, wraps
from typing import Dict, List, Set, Union
from django.db.models import F, Q, Value
from django.db.models.functions import Concat
from django.utils import timezone
from django.utils.translation import ugettext as _
from apps.backend.subscription import errors
from apps.backend.subscription.tools import create_group_id
from apps.node_man import constants, models
from apps.utils.time_handler import strftime_local
from common.log import logger
from pipeline.core.flow import Service
class ActivityType:
HEAD = 0
TAIL = 1
HEAD_TAIL = 2
class LogLevel:
INFO = 0
WARNING = 1
ERROR = 2
DEBUG = 3
LEVEL_PREFIX_MAP = {INFO: "INFO", WARNING: "WARNING", ERROR: "ERROR", DEBUG: "DEBUG"}
class LogMaker:
@staticmethod
def get_log_content(level: int, content: str) -> str:
return (
f"[{strftime_local(timezone.now())} "
f"{LogLevel.LEVEL_PREFIX_MAP.get(level, LogLevel.LEVEL_PREFIX_MAP[LogLevel.INFO])}] {content}"
)
def error_log(self, content: str) -> str:
return self.get_log_content(LogLevel.ERROR, content)
def info_log(self, content: str):
return self.get_log_content(LogLevel.INFO, content)
def warning_log(self, content: str):
return self.get_log_content(LogLevel.WARNING, content)
def debug_log(self, content: str):
return self.get_log_content(LogLevel.DEBUG, content)
def exception_handler(service_func):
# 原子执行逻辑最外层异常兜底
@wraps(service_func)
def wrapper(self, data, parent_data, *args, **kwargs):
act_name = data.get_one_of_inputs("act_name")
sub_inst_ids = self.get_subscription_instance_ids(data)
try:
return service_func(self, data, parent_data, *args, **kwargs)
except Exception as error:
error_msg = _("{act_name} 失败: {err},请先尝试查看错误日志进行处理,若无法解决,请联系管理员处理").format(
act_name=act_name, err=str(error), msg=traceback.format_exc()
)
logger.exception(error_msg)
# 尝试更新实例状态
self.bulk_set_sub_inst_act_status(
sub_inst_ids=sub_inst_ids,
status=constants.JobStatusType.FAILED,
common_log=self.log_maker.error_log(error_msg),
)
# traceback日志进行折叠
self.log_debug(
sub_inst_ids=sub_inst_ids,
log_content="{debug_begin}\n{traceback}\n{debug_end}".format(
debug_begin=" Begin of collected logs: ".center(40, "*"),
traceback=traceback.format_exc(),
debug_end=" End of collected logs ".center(40, "*"),
),
)
if self.schedule == service_func:
self.finish_schedule()
return False
return wrapper
class LogMixin:
log_maker_class = LogMaker
def get_log_maker(self):
return self.log_maker_class()
def log_base(
self, sub_inst_ids: [int, List[int], None] = None, log_content: str = None, level: int = LogLevel.INFO
):
"""
记录日志
:param sub_inst_ids:
:param log_content:
:param level:
:return:
"""
filters = {"node_id": self.id}
if sub_inst_ids is None:
pass
elif isinstance(sub_inst_ids, int):
filters["subscription_instance_record_id"] = sub_inst_ids
else:
filters["subscription_instance_record_id__in"] = sub_inst_ids
models.SubscriptionInstanceStatusDetail.objects.filter(**filters).update(
log=Concat("log", Value(f"\n{self.log_maker.get_log_content(level, log_content)}")),
update_time=timezone.now(),
)
def log_info(self, sub_inst_ids: [int, List[int], None] = None, log_content: str = None):
self.log_base(sub_inst_ids, log_content, level=LogLevel.INFO)
def log_warning(self, sub_inst_ids: [int, List[int], None] = None, log_content: str = None):
self.log_base(sub_inst_ids, log_content, level=LogLevel.WARNING)
def log_error(self, sub_inst_ids: [int, List[int], None] = None, log_content: str = None):
self.log_base(sub_inst_ids, log_content, level=LogLevel.ERROR)
def log_debug(self, sub_inst_ids: [int, List[int], None] = None, log_content: str = None):
self.log_base(sub_inst_ids, log_content, level=LogLevel.DEBUG)
class CommonData:
"""
抽象出通用数据结构体,同于原子执行时常用数据,避免重复编写
同时这里对类的实例变量进行类型注解,也能避免在调用处重复定义,提高代码编写效率
"""
def __init__(
self,
bk_host_ids: Set[int],
process_statuses: List[models.ProcessStatus],
host_id_obj_map: Dict[int, models.Host],
target_host_objs: List[models.Host],
ap_id_obj_map: Dict[int, models.AccessPoint],
subscription: models.Subscription,
policy_step_adapter,
group_id_instance_map: Dict[str, models.SubscriptionInstanceRecord],
subscription_instances: List[models.SubscriptionInstanceRecord],
subscription_instance_ids: Set[int],
):
from apps.backend.subscription.steps.adapter import PolicyStepAdapter
self.bk_host_ids = bk_host_ids
self.process_statuses = process_statuses
self.host_id_obj_map = host_id_obj_map
self.target_host_objs = target_host_objs
self.ap_id_obj_map = ap_id_obj_map
self.subscription = subscription
self.policy_step_adapter: PolicyStepAdapter = policy_step_adapter
self.group_id_instance_map = group_id_instance_map
self.plugin_name = policy_step_adapter.plugin_name
self.subscription_instances = subscription_instances
self.subscription_instance_ids = subscription_instance_ids
class BaseService(Service, LogMixin):
def __init__(self, *args, **kwargs):
self.failed_subscription_instance_id_reason_map: Dict = {}
self.log_maker = self.get_log_maker()
super().__init__(*args, **kwargs)
def move_insts_to_failed(self, sub_inst_ids: Union[List[int], Set[int]], log_content: str = None):
"""
将实例移动至failed_subscription_instance_id_reason_map,用于子类原子移除异常实例
:param sub_inst_ids: 订阅实例ID列表/集合
:param log_content: 异常日志
"""
for inst_id in sub_inst_ids:
self.failed_subscription_instance_id_reason_map[inst_id] = log_content
if log_content:
self.log_error(sub_inst_ids=sub_inst_ids, log_content=log_content)
def sub_inst_failed_handler(self, sub_inst_ids: Union[List[int], Set[int]]):
"""
订阅实例失败处理器,主要用于记录日志并把自增重试次数
:param sub_inst_ids: 订阅实例ID列表/集合
"""
instance_record_objs = list(models.SubscriptionInstanceRecord.objects.filter(id__in=sub_inst_ids))
# 同一批实例来自同一订阅
subscription = models.Subscription.get_subscription(instance_record_objs[0].subscription_id, show_deleted=True)
group_ids = [
create_group_id(subscription, inst_record_obj.instance_info) for inst_record_obj in instance_record_objs
]
models.ProcessStatus.objects.filter(source_id=subscription.id, group_id__in=group_ids).update(
retry_times=F("retry_times") + 1
)
logger.info(
f"subscription_id -> [{subscription.id}], subscription_instance_ids -> {sub_inst_ids}, "
f"act_id -> {self.id}: 插件部署失败,重试次数 +1"
)
self.log_warning(sub_inst_ids=sub_inst_ids, log_content=_("插件部署失败,重试次数 +1"))
def bulk_set_sub_inst_status(self, status: str, sub_inst_ids: Union[List[int], Set[int]]):
"""批量设置实例状态,对于实例及原子的状态更新只应该在base内部使用"""
models.SubscriptionInstanceRecord.objects.filter(id__in=sub_inst_ids).update(
status=status, update_time=timezone.now()
)
if status in [constants.JobStatusType.FAILED]:
self.sub_inst_failed_handler(sub_inst_ids)
def bulk_set_sub_inst_act_status(
self, sub_inst_ids: Union[List[int], Set[int]], status: str, common_log: str = None
):
"""
批量设置实例状态
:param sub_inst_ids:
:param status:
:param common_log: 全局日志,用于需要全局暴露的异常
:return:
"""
if not sub_inst_ids:
return
update_fields = {"status": status}
if common_log:
update_fields["log"] = Concat("log", Value(f"\n{common_log}"))
models.SubscriptionInstanceStatusDetail.objects.filter(
subscription_instance_record_id__in=sub_inst_ids, node_id=self.id
).update(**{**update_fields, "update_time": timezone.now()})
# 失败的实例需要更新汇总状态
if status in [constants.JobStatusType.FAILED]:
self.bulk_set_sub_inst_status(constants.JobStatusType.FAILED, sub_inst_ids)
@staticmethod
def get_subscription_instance_ids(data):
subscription_instance_ids = data.get_one_of_inputs("subscription_instance_ids")
# 优先取上个节点执行成功的订阅实例ID作为要执行的实例
succeeded_subscription_instance_ids = data.get_one_of_inputs("succeeded_subscription_instance_ids")
# "${" 代表该变量未被渲染,直接取 subscription_instance_ids 作为要执行的实例
if succeeded_subscription_instance_ids and "${" not in succeeded_subscription_instance_ids:
subscription_instance_ids = succeeded_subscription_instance_ids
return subscription_instance_ids
@classmethod
def get_common_data(cls, data):
"""
初始化常用数据,注意这些数据不能放在 self 属性里,否则会产生较大的 process snap shot,
另外也尽量不要在 schedule 中使用,否则多次回调可能引起性能问题
"""
from apps.backend.subscription.steps.adapter import PolicyStepAdapter
subscription_instance_ids = BaseService.get_subscription_instance_ids(data)
subscription_instances = list(
models.SubscriptionInstanceRecord.objects.filter(id__in=subscription_instance_ids)
)
# 同一批执行的任务都源于同一个订阅任务
subscription = models.Subscription.get_subscription(
subscription_instances[0].subscription_id, show_deleted=True
)
subscription_step_id = data.get_one_of_inputs("subscription_step_id")
try:
subscription_step = models.SubscriptionStep.objects.get(id=subscription_step_id)
except models.SubscriptionStep.DoesNotExist:
raise errors.SubscriptionStepNotExist({"step_id": subscription_step_id})
bk_host_ids = set()
subscription_instance_ids = set()
group_id_instance_map: Dict[str, models.SubscriptionInstanceRecord] = {}
for subscription_instance in subscription_instances:
bk_host_ids.add(subscription_instance.instance_info["host"]["bk_host_id"])
group_id = create_group_id(subscription, subscription_instance.instance_info)
group_id_instance_map[group_id] = subscription_instance
subscription_instance_ids.add(subscription_instance.id)
target_host_objs = None
if subscription.target_hosts:
# 目标主机,用于远程采集场景
query_conditions = reduce(
operator.or_,
[
Q(inner_ip=target_host["ip"], bk_cloud_id=target_host["bk_cloud_id"])
for target_host in subscription.target_hosts
],
)
target_host_objs = models.Host.objects.filter(query_conditions)
for host in target_host_objs:
bk_host_ids.add(host.bk_host_id)
policy_step_adapter = PolicyStepAdapter(subscription_step)
host_id_obj_map: Dict[int, models.Host] = models.Host.host_id_obj_map(bk_host_id__in=bk_host_ids)
ap_id_obj_map = models.AccessPoint.ap_id_obj_map()
process_statuses = models.ProcessStatus.objects.filter(
name=policy_step_adapter.plugin_name, group_id__in=group_id_instance_map.keys()
)
return CommonData(
bk_host_ids,
process_statuses,
host_id_obj_map,
target_host_objs,
ap_id_obj_map,
subscription,
policy_step_adapter,
group_id_instance_map,
subscription_instances,
subscription_instance_ids,
)
def set_current_id(self, subscription_instance_ids: List[int]):
# 更新当前实例的pipeline id
models.SubscriptionInstanceRecord.objects.filter(id__in=subscription_instance_ids).update(pipeline_id=self.id)
def set_outputs_data(self, data, common_data: CommonData) -> bool:
data.outputs.succeeded_subscription_instance_ids = [
sub_inst_id
for sub_inst_id in common_data.subscription_instance_ids
if sub_inst_id not in self.failed_subscription_instance_id_reason_map.keys()
]
# 只要有成功的实例,则认为流程可以继续往下走
return bool(data.outputs.succeeded_subscription_instance_ids)
def _execute(self, data, parent_data, common_data: CommonData):
raise NotImplementedError
def _schedule(self, data, parent_data, callback_data=None):
pass
def run(self, service_func, data, parent_data, **kwargs) -> bool:
subscription_instance_ids = BaseService.get_subscription_instance_ids(data)
act_name = data.get_one_of_inputs("act_name")
act_type = data.get_one_of_inputs("act_type")
# 流程起始设置RUNNING
if service_func == self._execute and act_type in [ActivityType.HEAD, ActivityType.HEAD_TAIL]:
self.bulk_set_sub_inst_status(constants.JobStatusType.RUNNING, subscription_instance_ids)
service_func(data, parent_data, **kwargs)
failed_subscription_instance_id_set = set(self.failed_subscription_instance_id_reason_map.keys())
succeeded_subscription_instance_id_set = set(subscription_instance_ids) - failed_subscription_instance_id_set
# 处理提前终止的情况
revoked_subscription_instance_ids = list(
models.SubscriptionInstanceRecord.objects.filter(
id__in=succeeded_subscription_instance_id_set, status=constants.JobStatusType.FAILED
).values_list("id", flat=True)
)
# 更新成功 or 失败的实例状态
succeeded_subscription_instance_ids = list(
succeeded_subscription_instance_id_set - set(revoked_subscription_instance_ids)
)
self.bulk_set_sub_inst_act_status(
sub_inst_ids=revoked_subscription_instance_ids,
status=constants.JobStatusType.FAILED,
common_log=self.log_maker.warning_log(
_("{act_name} 已终止,可整体重试/重试。(details: {revoke_sub_inst_id_set})").format(
act_name=act_name, revoke_sub_inst_id_set=revoked_subscription_instance_ids
)
),
)
data.outputs.succeeded_subscription_instance_ids = succeeded_subscription_instance_ids
self.bulk_set_sub_inst_act_status(
sub_inst_ids=failed_subscription_instance_id_set,
status=constants.JobStatusType.FAILED,
common_log=self.log_maker.error_log(
_("{act_name} 失败,请先尝试查看日志并处理,若无法解决,请联系管理员处理。").format(act_name=act_name)
),
)
# 需要进入调度逻辑
if self.need_schedule() and not self.is_schedule_finished():
return bool(succeeded_subscription_instance_ids)
self.bulk_set_sub_inst_act_status(
sub_inst_ids=succeeded_subscription_instance_ids,
status=constants.JobStatusType.SUCCESS,
common_log=self.log_maker.info_log(_("{act_name} 成功").format(act_name=act_name)),
)
# 流程结束设置成功的实例
if act_type in [ActivityType.TAIL, ActivityType.HEAD_TAIL]:
self.bulk_set_sub_inst_status(
constants.JobStatusType.SUCCESS, sub_inst_ids=succeeded_subscription_instance_ids
)
return bool(succeeded_subscription_instance_ids)
@exception_handler
def execute(self, data, parent_data):
common_data = self.get_common_data(data)
act_name = data.get_one_of_inputs("act_name")
subscription_instance_ids = self.get_subscription_instance_ids(data)
to_be_created_sub_statuses = [
models.SubscriptionInstanceStatusDetail(
subscription_instance_record_id=sub_inst_id,
node_id=self.id,
status=constants.JobStatusType.RUNNING,
log=self.log_maker.info_log(_("开始 {act_name}.").format(act_name=act_name)),
)
for sub_inst_id in subscription_instance_ids
]
models.SubscriptionInstanceStatusDetail.objects.bulk_create(to_be_created_sub_statuses)
self.set_current_id(subscription_instance_ids)
return self.run(self._execute, data, parent_data, common_data=common_data)
@exception_handler
def schedule(self, data, parent_data, callback_data=None):
return self.run(self._schedule, data, parent_data, callback_data=callback_data)
def inputs_format(self):
return [
Service.InputItem(
name="subscription_instance_ids", key="subscription_instance_ids", type="list", required=True
),
Service.InputItem(name="subscription_step_id", key="subscription_step_id", type="int", required=True),
]
def outputs_format(self):
return [
Service.OutputItem(
name="succeeded_subscription_instance_ids",
key="succeeded_subscription_instance_ids",
type="list",
required=True,
)
]
| 40.878652 | 119 | 0.678522 | 16,479 | 0.853613 | 0 | 0 | 6,360 | 0.329448 | 0 | 0 | 4,063 | 0.210464 |
0fdf08782d0c6126eba111656026fe30af178bba | 389 | py | Python | warpworks/views/index.py | storborg/warpworks | a41a0a5bab8b826157309f7d0bafbdcdff66505b | [
"MIT"
] | null | null | null | warpworks/views/index.py | storborg/warpworks | a41a0a5bab8b826157309f7d0bafbdcdff66505b | [
"MIT"
] | null | null | null | warpworks/views/index.py | storborg/warpworks | a41a0a5bab8b826157309f7d0bafbdcdff66505b | [
"MIT"
] | null | null | null | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from pyramid.view import view_config
from pyweaving.generators.twill import twill
@view_config(route_name='index', renderer='index.html')
def index_view(request):
draft = twill(2, warp_color=(200, 0, 0), weft_color=(90, 90, 90))
return dict(draft_json=draft.to_json())
| 32.416667 | 69 | 0.722365 | 0 | 0 | 0 | 0 | 194 | 0.498715 | 0 | 0 | 19 | 0.048843 |
0fdf761292ed2657c4ca262e9905100c59129c2e | 3,291 | py | Python | linux_odp/.waf-1.6.8-3e3391c5f23fbabad81e6d17c63a1b1e/waflib/Tools/cs.py | dproc/trex_odp_porting_integration | 84d5f27a7eab8186b68c5a2b1409d3d0f41f859b | [
"Apache-2.0"
] | null | null | null | linux_odp/.waf-1.6.8-3e3391c5f23fbabad81e6d17c63a1b1e/waflib/Tools/cs.py | dproc/trex_odp_porting_integration | 84d5f27a7eab8186b68c5a2b1409d3d0f41f859b | [
"Apache-2.0"
] | null | null | null | linux_odp/.waf-1.6.8-3e3391c5f23fbabad81e6d17c63a1b1e/waflib/Tools/cs.py | dproc/trex_odp_porting_integration | 84d5f27a7eab8186b68c5a2b1409d3d0f41f859b | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
if sys.hexversion < 0x020400f0: from sets import Set as set
from waflib import Utils,Task,Options,Logs,Errors
from waflib.TaskGen import before_method,after_method,feature
from waflib.Tools import ccroot
from waflib.Configure import conf
ccroot.USELIB_VARS['cs']=set(['CSFLAGS','ASSEMBLIES','RESOURCES'])
ccroot.lib_patterns['csshlib']=['%s']
def apply_cs(self):
cs_nodes=[]
no_nodes=[]
for x in self.to_nodes(self.source):
if x.name.endswith('.cs'):
cs_nodes.append(x)
else:
no_nodes.append(x)
self.source=no_nodes
bintype=getattr(self,'type',self.gen.endswith('.dll')and'library'or'exe')
self.cs_task=tsk=self.create_task('mcs',cs_nodes,self.path.find_or_declare(self.gen))
tsk.env.CSTYPE='/target:%s'%bintype
tsk.env.OUT='/out:%s'%tsk.outputs[0].abspath()
inst_to=getattr(self,'install_path',bintype=='exe'and'${BINDIR}'or'${LIBDIR}')
if inst_to:
mod=getattr(self,'chmod',bintype=='exe'and Utils.O755 or Utils.O644)
self.install_task=self.bld.install_files(inst_to,self.cs_task.outputs[:],env=self.env,chmod=mod)
def use_cs(self):
names=self.to_list(getattr(self,'use',[]))
get=self.bld.get_tgen_by_name
for x in names:
try:
y=get(x)
except Errors.WafError:
self.cs_task.env.append_value('CSFLAGS','/reference:%s'%x)
continue
y.post()
tsk=getattr(y,'cs_task',None)or getattr(y,'link_task',None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r'%self)
self.cs_task.dep_nodes.extend(tsk.outputs)
self.cs_task.set_run_after(tsk)
self.cs_task.env.append_value('CSFLAGS','/reference:%s'%tsk.outputs[0].abspath())
def debug_cs(self):
csdebug=getattr(self,'csdebug',self.env.CSDEBUG)
if not csdebug:
return
node=self.cs_task.outputs[0]
if self.env.CS_NAME=='mono':
out=node.parent.find_or_declare(node.name+'.mdb')
else:
out=node.change_ext('.pdb')
self.cs_task.outputs.append(out)
try:
self.install_task.source.append(out)
except AttributeError:
pass
if csdebug=='pdbonly':
val=['/debug+','/debug:pdbonly']
elif csdebug=='full':
val=['/debug+','/debug:full']
else:
val=['/debug-']
self.cs_task.env.append_value('CSFLAGS',val)
class mcs(Task.Task):
color='YELLOW'
run_str='${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}'
def configure(conf):
csc=getattr(Options.options,'cscbinary',None)
if csc:
conf.env.MCS=csc
conf.find_program(['csc','mcs','gmcs'],var='MCS')
conf.env.ASS_ST='/r:%s'
conf.env.RES_ST='/resource:%s'
conf.env.CS_NAME='csc'
if str(conf.env.MCS).lower().find('mcs')>-1:
conf.env.CS_NAME='mono'
def options(opt):
opt.add_option('--with-csc-binary',type='string',dest='cscbinary')
class fake_csshlib(Task.Task):
color='YELLOW'
inst_to=None
def runnable_status(self):
for x in self.outputs:
x.sig=Utils.h_file(x.abspath())
return Task.SKIP_ME
def read_csshlib(self,name,paths=[]):
return self(name=name,features='fake_lib',lib_paths=paths,lib_type='csshlib')
feature('cs')(apply_cs)
before_method('process_source')(apply_cs)
feature('cs')(use_cs)
after_method('apply_cs')(use_cs)
feature('cs')(debug_cs)
after_method('apply_cs','use_cs')(debug_cs)
conf(read_csshlib) | 33.581633 | 102 | 0.726831 | 301 | 0.091462 | 0 | 0 | 0 | 0 | 0 | 0 | 799 | 0.242783 |
0fdf80104584efaaae23ea389313ea17858895b0 | 1,806 | py | Python | fbpmp/utils/abstract_file_ctx.py | benliugithub/fbpcs | 7af984264428058645847135026d474d7e28144e | [
"MIT"
] | null | null | null | fbpmp/utils/abstract_file_ctx.py | benliugithub/fbpcs | 7af984264428058645847135026d474d7e28144e | [
"MIT"
] | null | null | null | fbpmp/utils/abstract_file_ctx.py | benliugithub/fbpcs | 7af984264428058645847135026d474d7e28144e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import os
import pathlib
from fbpcp.service.storage_s3 import S3StorageService
from fbpmp.utils.buffered_s3_file_handler import BufferedS3Reader, BufferedS3Writer
S3_PATH_DRIVE = "https:"
def abstract_file_reader_path(path: pathlib.Path) -> pathlib.Path:
if path.parts[0].lower() == S3_PATH_DRIVE:
region = os.environ.get("PL_AWS_REGION")
key_id = os.environ.get("PL_AWS_KEY_ID")
key_data = os.environ.get("PL_AWS_KEY_DATA")
if region:
storage_service = S3StorageService(
region=region, access_key_id=key_id, access_key_data=key_data
)
else:
storage_service = S3StorageService(
access_key_id=key_id, access_key_data=key_data
)
with BufferedS3Reader(path, storage_service) as reader:
return reader.copy_to_local()
else:
return pathlib.Path(path)
def abstract_file_writer_ctx(path: pathlib.Path) -> contextlib.AbstractContextManager:
if path.parts[0].lower() == S3_PATH_DRIVE:
region = os.environ.get("PL_AWS_REGION")
key_id = os.environ.get("PL_AWS_KEY_ID")
key_data = os.environ.get("PL_AWS_KEY_DATA")
if region:
storage_service = S3StorageService(
region=region, access_key_id=key_id, access_key_data=key_data
)
else:
storage_service = S3StorageService(
access_key_id=key_id, access_key_data=key_data
)
return BufferedS3Writer(path, storage_service)
else:
return open(path, "w")
| 34.075472 | 86 | 0.668882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 300 | 0.166113 |
0fe028e53da10f567941d0206c9d2ffff90acae0 | 418 | py | Python | prometheus_redis_client/helpers.py | ChielWH/prometheus_redis_client | 90852fb0eaf3aee1937a74cad6181c304dc6999a | [
"Apache-2.0"
] | 19 | 2019-04-02T11:46:45.000Z | 2022-03-21T03:38:56.000Z | prometheus_redis_client/helpers.py | ChielWH/prometheus_redis_client | 90852fb0eaf3aee1937a74cad6181c304dc6999a | [
"Apache-2.0"
] | 12 | 2019-08-17T05:55:23.000Z | 2022-02-10T07:37:38.000Z | prometheus_redis_client/helpers.py | ChielWH/prometheus_redis_client | 90852fb0eaf3aee1937a74cad6181c304dc6999a | [
"Apache-2.0"
] | 6 | 2019-11-20T15:12:38.000Z | 2022-03-21T03:36:44.000Z | import time
from typing import Callable
from functools import wraps
def timeit(metric_callback: Callable, **labels):
def wrapper(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
metric_callback(time.time() - start, labels=labels)
return result
return func_wrapper
return wrapper
| 26.125 | 63 | 0.62201 | 0 | 0 | 0 | 0 | 220 | 0.526316 | 0 | 0 | 0 | 0 |
0fe195d068b46848f6a4f6f2244542d3be9fe7d3 | 930 | py | Python | tests/test_build.py | polishmatt/swp | e54a8f4ed13a48dd3a385c6d312edd6e4c86724a | [
"MIT"
] | 1 | 2017-02-13T23:09:46.000Z | 2017-02-13T23:09:46.000Z | tests/test_build.py | polishmatt/swp | e54a8f4ed13a48dd3a385c6d312edd6e4c86724a | [
"MIT"
] | 16 | 2016-11-03T02:50:17.000Z | 2017-01-24T04:35:42.000Z | tests/test_build.py | polishmatt/stawp | e54a8f4ed13a48dd3a385c6d312edd6e4c86724a | [
"MIT"
] | null | null | null |
import unittest
import filecmp
from stawp.build import Builder
class TestBuild(unittest.TestCase):
def build_fixture(self, name, description):
dest='/tmp/swp'
source='tests/fixtures/%s/src' % name
builder = Builder(dist=dest, base=source)
builder.interpret()
builder.render()
cmp = filecmp.dircmp(dest, 'tests/fixtures/%s/dest' % name)
for attr in ['left_only', 'right_only', 'common_funny', 'diff_files', 'funny_files']:
self.assertEqual(len(getattr(cmp, attr)), 0, 'Failed ' + description + "\n" + attr + " - " + str(getattr(cmp, attr)))
def test_build_min_copy(self):
self.build_fixture('minimum-copy', 'copy file only')
def test_build_min_page(self):
self.build_fixture('minimum-page', 'minimum configuration with one page')
def test_build_default(self):
self.build_fixture('builder', 'default build behavior')
| 34.444444 | 129 | 0.658065 | 863 | 0.927957 | 0 | 0 | 0 | 0 | 0 | 0 | 251 | 0.269892 |
0fe287ed47660782eaba77ed18083c4847e2daf2 | 1,721 | py | Python | ARP-SPOOF.py | DEMON1A/ARP-SP00FER | a5fac4fd1da1f095ea1d09bd0b1708edfd63391c | [
"MIT"
] | 2 | 2019-10-22T20:49:04.000Z | 2019-10-22T21:05:12.000Z | ARP-SPOOF.py | DEMON1A/ARP-SP00FER | a5fac4fd1da1f095ea1d09bd0b1708edfd63391c | [
"MIT"
] | null | null | null | ARP-SPOOF.py | DEMON1A/ARP-SP00FER | a5fac4fd1da1f095ea1d09bd0b1708edfd63391c | [
"MIT"
] | null | null | null | # Start Imports.
import scapy.all as scapy
import time , sys
def Banner():
# One Line Banner
Ban = "\t\t\t\t\t[+] << ARP-SP00F (MITM) >> [+]"
print(Ban)
Banner()
Address = input("\nTarget IP >> ")
MainIP = input("Network IP >> ")
Packets = 0
def MAC_SCAN(ip):
arp = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
ARP_Req_broadcast = arp/broadcast
answerd = scapy.srp(ARP_Req_broadcast,timeout=1,verbose=False)[0]
return answerd[0][1].hwsrc
def SPOOF(target,network):
target_MAC = MAC_SCAN(target) # Get The Victim MAC Address
packet = scapy.ARP(op=2,pdst=target,hwdst=target_MAC,pscr=network) # Create The Packet To Send TO The Victim/Router.
scapy.send(packet,verbose=False) # Send The Package Using Scapy
def restore(dist_ip,real_ip):
dist_MAC = MAC_SCAN(dist_ip)
real_MAC = MAC_SCAN(real_ip)
packet = scapy.ARP(op=2,pdst=dist_ip,hwdst=real_MAC,pscr=real_ip,hwsrc=real_MAC)
scapy.send(packet,count=4,verbose=False)
try:
while True:
SPOOF(Address,MainIP) #--\ Send A Request To Address Once
SPOOF(MainIP,Address) #--/ Then Send Another Request To The Router.
Packets += 2 # Add 2 To The Total Number Of Packets After Sent The Requests
print("\r[+] Successful Send {0} Packets".format(str(Packets)), end="") # Print The Number Of Sent Packets
sys.stdout.flush() # Keep The Print AT Same Line
time.sleep(2.4) # Wait For 2.4 Seconds
except KeyboardInterrupt:
print("\nCancel.")
answer = input("Do you want to restore every thing? (Y)es or (N)o: ")
if answer.lower() == "y":
restore(Address,MainIP)
elif answer.lower() == "n":
print("Ok!")
sys.exit()
else: # Auto Exit If There Is Wrong Answer.
sys.exit()
| 33.745098 | 118 | 0.684486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 622 | 0.361418 |
0fe648b3645c63c3137ac9f66876140376501f31 | 3,513 | py | Python | ebrains_drive/repo.py | apdavison/ebrains-drive | 8ff11fc60e77c1b605485f550efc350c1a5f443a | [
"Apache-2.0"
] | 5 | 2020-12-20T16:12:18.000Z | 2022-03-29T09:56:34.000Z | ebrains_drive/repo.py | apdavison/ebrains-drive | 8ff11fc60e77c1b605485f550efc350c1a5f443a | [
"Apache-2.0"
] | 9 | 2020-09-25T08:27:41.000Z | 2022-01-03T08:41:48.000Z | ebrains_drive/repo.py | apdavison/ebrains-drive | 8ff11fc60e77c1b605485f550efc350c1a5f443a | [
"Apache-2.0"
] | 2 | 2020-09-18T17:32:38.000Z | 2020-10-13T13:34:38.000Z | from urllib.parse import urlencode
from ebrains_drive.files import SeafDir, SeafFile
from ebrains_drive.utils import raise_does_not_exist
class Repo(object):
"""
A seafile library
"""
def __init__(self, client, **kwargs):
self.client = client
allowed_keys = ['encrypted', 'group_name', 'groupid', 'head_commit_id', 'id', 'modifier_contact_email', 'modifier_email', 'modifier_name', 'mtime', 'mtime_relative', 'name', 'owner', 'owner_contact_email', 'owner_name', 'permission', 'root', 'share_from', 'share_from_contact_email', 'share_from_name', 'share_type', 'size', 'size_formatted', 'type', 'version', 'virtual']
# Update __dict__ but only for keys that have been predefined
# (silently ignore others)
self.__dict__.update((key, value) for key, value in kwargs.items() if key in allowed_keys)
# To NOT silently ignore rejected keys
# rejected_keys = set(kwargs.keys()) - set(allowed_keys)
# if rejected_keys:
# raise ValueError("Invalid arguments in constructor:{}".format(rejected_keys))
def __str__(self):
return "(id='{}', name='{}')".format(self.id, self.name)
def __repr__(self):
return "ebrains_drive.repo.Repo(id='{}', name='{}')".format(self.id, self.name)
@classmethod
def from_json(cls, client, repo_json):
return cls(client, **repo_json)
def is_readonly(self):
return 'w' not in self.perm
@raise_does_not_exist('The requested file does not exist')
def get_file(self, path):
"""Get the file object located in `path` in this repo.
Return a :class:`SeafFile` object
"""
assert path.startswith('/')
url = '/api2/repos/%s/file/detail/' % self.id
query = '?' + urlencode(dict(p=path))
file_json = self.client.get(url + query).json()
return SeafFile(self, path, file_json['id'], "file", file_json['size'])
@raise_does_not_exist('The requested dir does not exist')
def get_dir(self, path):
"""Get the dir object located in `path` in this repo.
Return a :class:`SeafDir` object
"""
assert path.startswith('/')
url = '/api2/repos/%s/dir/' % self.id
query = '?' + urlencode(dict(p=path))
resp = self.client.get(url + query)
dir_id = resp.headers['oid']
dir_json = resp.json()
dir = SeafDir(self, path, dir_id, "dir")
dir.load_entries(dir_json)
return dir
def delete(self):
"""Remove this repo. Only the repo owner can do this"""
self.client.delete('/api2/repos/' + self.id)
def list_history(self):
"""List the history of this repo
Returns a list of :class:`RepoRevision` object.
"""
pass
## Operations only the repo owner can do:
def update(self, name=None):
"""Update the name of this repo. Only the repo owner can do
this.
"""
pass
def get_settings(self):
"""Get the settings of this repo. Returns a dict containing the following
keys:
`history_limit`: How many days of repo history to keep.
"""
pass
def restore(self, commit_id):
pass
class RepoRevision(object):
def __init__(self, client, repo, commit_id):
self.client = client
self.repo = repo
self.commit_id = commit_id
def restore(self):
"""Restore the repo to this revision"""
self.repo.revert(self.commit_id)
| 34.441176 | 380 | 0.618275 | 3,371 | 0.959579 | 0 | 0 | 1,119 | 0.318531 | 0 | 0 | 1,566 | 0.445773 |
0fe7f5a0d2d6cc56f2638823285b05af4eeb1e16 | 765 | py | Python | dice_vtk/geometries/regularPolygon.py | dicehub/dice_vtk | ab8d9f34ae359461db5687d05bf38548bbaca6ea | [
"MIT"
] | null | null | null | dice_vtk/geometries/regularPolygon.py | dicehub/dice_vtk | ab8d9f34ae359461db5687d05bf38548bbaca6ea | [
"MIT"
] | null | null | null | dice_vtk/geometries/regularPolygon.py | dicehub/dice_vtk | ab8d9f34ae359461db5687d05bf38548bbaca6ea | [
"MIT"
] | null | null | null | # External modules
# ================
from vtk import vtkRegularPolygonSource
# DICE modules
# ============
from .simple_geometry import SimpleGeometry
from .geometry_base import GeometryProperty
class RegularPolygon(SimpleGeometry):
def __init__(self, name='RegularPolygon', **kwargs):
super().__init__(name=name,
source=vtkRegularPolygonSource, **kwargs)
@GeometryProperty
def radius(self):
return self.source.GetRadius()
@radius.setter
def radius(self, value):
self.source.SetRadius(value)
@GeometryProperty
def number_of_sides(self):
return self.source.GetNumberOfSides()
@number_of_sides.setter
def number_of_sides(self, value):
self.source.SetNumberOfSides(value)
| 24.677419 | 56 | 0.686275 | 565 | 0.738562 | 0 | 0 | 357 | 0.466667 | 0 | 0 | 80 | 0.104575 |
0fe8f61e2798227c0a5f37e919ed4b4d1165a3cb | 3,411 | py | Python | setup.py | erisyon/plaster | 20af32aed2365c6351fe3c26293308960099152b | [
"MIT"
] | null | null | null | setup.py | erisyon/plaster | 20af32aed2365c6351fe3c26293308960099152b | [
"MIT"
] | 22 | 2020-06-22T19:27:50.000Z | 2021-09-30T20:02:31.000Z | setup.py | erisyon/plaster | 20af32aed2365c6351fe3c26293308960099152b | [
"MIT"
] | 2 | 2020-06-16T17:38:46.000Z | 2021-08-06T09:37:22.000Z | # python setup.py build_ext --inplace
import importlib
import os
import pathlib
from setuptools import Extension, setup
from setuptools.command.build_ext import build_ext as build_ext_orig
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
exec(open("plaster/version.py").read())
# Custom extension logic copied from https://stackoverflow.com/a/48015772
class PlasterCExtension(Extension):
def __init__(self, module):
self.module = module
# don't invoke the original build_ext for this special extension
super().__init__("plaster", sources=[])
class build_ext(build_ext_orig):
def run(self):
for ext in self.extensions:
self.build_plaster_c(ext)
super().run()
def build_plaster_c(self, ext):
if isinstance(ext, PlasterCExtension):
cwd = pathlib.Path().absolute()
ext_file = pathlib.Path(self.get_ext_fullpath(ext.name))
# This is necessary to trick build_ext into doing the right thing
ext_file.mkdir(parents=True, exist_ok=True)
extdir = ext_file.parent
extdir.mkdir(parents=True, exist_ok=True)
# The directory where all output files need to go appears to be:
output_dir = extdir.absolute()
# The source directory appears to be the cwd in this context
source_dir = cwd
build_mod = importlib.import_module(f"{ext.module}.build")
build_mod.build(source_dir=source_dir, output_dir=output_dir, force=True)
setup(
name="erisyonplaster",
version=__version__,
description="Erisyon's Fluoro-Sequencing Platform",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/erisyon/plaster",
author="Erisyon",
author_email="plaster+pypi@erisyon.com",
license="MIT",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=["plaster"],
include_package_data=True,
install_requires=[
"arrow",
"bokeh",
"ipython",
"jupyter",
"munch",
"nbstripout",
"nptyping",
"numpy",
"opencv-python",
"pandas",
"plumbum",
# see the comment in plaster/plaster/run/sigproc_v2/synth.py for why this is commented out
# "psf",
"psutil",
"pudb",
"pyyaml",
"requests",
"retrying",
"scikit-image",
"scikit-learn",
"twine",
"wheel",
"zbs.zest",
],
python_requires=">=3.6",
ext_modules=[
PlasterCExtension("plaster.run.nn_v2.c"),
PlasterCExtension("plaster.run.sigproc_v2.c_gauss2_fitter"),
PlasterCExtension("plaster.run.sigproc_v2.c_radiometry"),
PlasterCExtension("plaster.run.sigproc_v2.c_peak_find"),
PlasterCExtension("plaster.run.survey_v2.c"),
PlasterCExtension("plaster.run.sim_v2.c_dytsim"),
PlasterCExtension("plaster.run.sim_v2.c_radsim"),
PlasterCExtension("plaster.run.prep.c"),
],
cmdclass={"build_ext": build_ext,},
)
| 31.293578 | 98 | 0.635884 | 1,169 | 0.342715 | 0 | 0 | 0 | 0 | 0 | 0 | 1,397 | 0.409557 |
0fea9e84390ca4337814033e2de20494658f11f0 | 6,823 | py | Python | src/commands/ctx.py | jportner/kibbe | 72d62ba0c1ca206430fc606c72d19eace6329e41 | [
"MIT"
] | 2 | 2021-08-17T15:04:38.000Z | 2021-12-28T15:41:42.000Z | src/commands/ctx.py | jportner/kibbe | 72d62ba0c1ca206430fc606c72d19eace6329e41 | [
"MIT"
] | 7 | 2021-08-24T07:53:19.000Z | 2022-01-26T20:45:24.000Z | src/commands/ctx.py | jportner/kibbe | 72d62ba0c1ca206430fc606c72d19eace6329e41 | [
"MIT"
] | 2 | 2021-07-15T16:19:44.000Z | 2022-01-19T20:25:30.000Z | import os
from pathlib import Path
from src.git import find_existing_worktree, get_worktree_list_flat
from src.tmux import get_current_panel, is_inside_tmux
import subprocess
import click
from termcolor import colored
from src.util import get_valid_filename
@click.command()
@click.argument(
"name", type=click.STRING, autocompletion=get_worktree_list_flat, required=False
)
@click.option(
"--branch",
help="Branch name to use for the new worktree",
default="",
)
@click.option(
"--parent-path",
help="Custom parent path to use to set the new worktree. Defaults to ../",
default="",
)
@click.option(
"--source",
help="Branch to create the worktree from. Defaults to current branch. e.g. master",
default="",
)
@click.option(
"-i",
"--interactive",
help="Ask questions about the new context",
is_flag=True,
default=True,
)
@click.option(
"-B",
"--overwrite-branch",
help="If the branch already exists, reset it to source",
is_flag=True,
default=False,
)
@click.option(
"--cd",
"--change-dir",
help="Change to new context directory (Tmux only)",
is_flag=True,
default=True,
)
@click.option(
"--delete",
help="Removes the worktree if exists",
is_flag=True,
default=False,
confirmation_prompt=True,
)
@click.option(
"-l",
"--list-worktrees",
help="List existing worktree. Alias for `git worktree list`",
is_flag=True,
default=False,
)
def ctx(
name,
parent_path,
source,
branch,
interactive,
overwrite_branch,
cd,
list_worktrees,
delete,
):
"""
ctx is a wrapper kibbe subcommand for git worktree with some quality of life improvements.
NAME accepts a name of the "context" you want to switch. It is a shorthand to not have to remember
paths as is required with git worktree.
It allows you to quickly switch and create git worktrees without having to type or memorize
all the git worktree parameterse.
ctx works better when you use it with tmux. In mac, if you use iterm2,
you can start tmux with `tmux -CC`. Install it first with `brew install tmux`
ctx is not intended to be a replacement for git worktree, if you can't perform the operation
you want with ctx please see the git worktree manual entry https://git-scm.com/docs/git-worktree
"""
if list_worktrees:
subprocess.run(["git", "worktree", "list"])
exit(0)
if not name:
raise click.ClickException(
colored(
"You must pass the NAME of the worktree you want to change to", "red"
)
)
if name.startswith("../"):
name = name[3:]
path_name = get_valid_filename(name)
existing_worktree = find_existing_worktree(path_name)
if delete and not existing_worktree:
raise click.ClickException(
"Can not remove worktree. Worktree doesn't exist: "
+ colored(path_name, "red")
)
elif delete:
if click.confirm(
"Are you sure you want to delete the wortree"
+ colored(existing_worktree["worktree"], "yellow")
):
click.echo("Deleting worktree...")
subprocess.run(["git", "worktree", "remove", existing_worktree["worktree"]])
exit(0)
if existing_worktree:
return handle_existing_worktree(existing_worktree)
if not branch:
if interactive:
branch = click.prompt("Git branch name fore the new worktree", default=name)
else:
branch = name
if not parent_path:
possible_path = os.path.join(Path(os.getcwd()).parent.absolute())
if interactive:
parent_path = click.prompt(
"Parent path target for the git worktree",
default=possible_path,
type=click.Path(dir_okay=True, file_okay=False),
)
else:
parent_path = possible_path
full_path = os.path.join(parent_path, path_name)
if not source:
possible_source = subprocess.getoutput("git rev-parse --abbrev-ref HEAD")
if interactive:
source = click.prompt(
"Source branch for the git worktree. e.g. master",
default=possible_source,
)
else:
source = possible_source
click.echo("Will create a new git worktree called: " + colored(path_name, "yellow"))
click.echo("In this location: " + colored(parent_path, "blue"))
click.echo("With a new branch name: " + colored(branch, "blue"))
click.echo("From this branch: " + colored(source, "blue"))
click.echo("---git output--")
b_option = "-b" if not overwrite_branch else "-B"
command = ["git", "worktree", "add", full_path, source, b_option, branch]
process = subprocess.run(command)
click.echo("--- end git output---")
if process.returncode != 0:
raise click.ClickException(
colored(
"Something went wrong with git. See git output and verify your"
" parameters",
"red",
)
)
click.echo(
colored("Success!", "green")
+ " a new git worktree was created in "
+ colored(full_path, "blue")
)
click.echo("To change to your new worktree run:")
click.echo(colored("cd %s" % full_path, "yellow"))
# this must always be the last command
if cd and is_inside_tmux():
click.echo("Tmux session detected. Changing to worktree")
current_pane = get_current_panel()
current_pane.send_keys("cd %s && nvm use" % full_path)
exit(0)
elif not is_inside_tmux():
click.echo(
"Changing to a worktree is only supported if you are running inside tmux"
)
def handle_existing_worktree(existing_worktree):
existing_path_name = existing_worktree["worktree"]
click.echo(
"Existing worktree with the same name found at "
+ colored(existing_path_name, "yellow")
)
click.echo("Worktree branch: " + colored(existing_worktree["branch"], "blue"))
click.echo("Head commit: " + colored(existing_worktree["HEAD"], "blue"))
click.echo()
if Path(existing_path_name) == Path(os.getcwd()):
click.echo(colored("You are already on this worktree", "yellow"))
exit(0)
if not is_inside_tmux():
click.echo("You can switch to it by running: ")
click.echo(colored("cd %s" % existing_path_name, "blue"))
click.echo()
click.echo("Run this command inside tmux to automatically cd to it")
else:
click.echo("Tmux session detected. Changing to worktree")
current_pane = get_current_panel()
current_pane.send_keys("cd %s && nvm use" % existing_path_name)
exit(0)
| 30.596413 | 102 | 0.628463 | 0 | 0 | 0 | 0 | 5,548 | 0.813132 | 0 | 0 | 2,625 | 0.384728 |
0feb0c6e6241aa5986054baa7aa3874ebe9efe97 | 4,313 | py | Python | code/biases_generation/biases_generation.py | commonsense-exception/commonsense-exception | ab83323a2d566f49b6de7b4b06c3c338ceec895f | [
"MIT"
] | null | null | null | code/biases_generation/biases_generation.py | commonsense-exception/commonsense-exception | ab83323a2d566f49b6de7b4b06c3c338ceec895f | [
"MIT"
] | null | null | null | code/biases_generation/biases_generation.py | commonsense-exception/commonsense-exception | ab83323a2d566f49b6de7b4b06c3c338ceec895f | [
"MIT"
] | null | null | null | import pandas as pd
import sys, argparse
import importlib.util
from tqdm import tqdm
NUM_SELECTING = 10
def parse_args():
parser = argparse.ArgumentParser(description='arguments for generating biases')
parser.add_argument('-top', help="Size of intersection of interest", default=10)
parser.add_argument('-d', help="source data path (THINGS dataset)",\
default="../../data/source/things_concepts.tsv")
parser.add_argument('-o', help="output path - path to the top-k associative biases",\
default="../../data/assets/k_analysis/")
# Brown University argument
parser.add_argument('-dept', help="whether we're on the department machine or not", default="True")
return parser.parse_args()
# helper function to help load things from maskedlm folder
def module_from_file(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
MASKEDLM_MODULE_PATH = "../maskedlm/maskedlm.py"
# importimng the function one_mask_generation from maskedlm.py
one_mask_generation = module_from_file('one_mask_generation', MASKEDLM_MODULE_PATH).one_mask_generation
# importing the list of supported models from maskedlm.py
MODEL_NAMES = module_from_file('one_mask_generation', MASKEDLM_MODULE_PATH).MODEL_NAMES
# import the function to load the tokenizer
load_model_and_tokenizer = module_from_file('load_model_and_tokenizer', MASKEDLM_MODULE_PATH).load_model_and_tokenizer
def find_common_given_model(tokenizer, model, model_name, df, loadFromFile=False, top=10):
"""
input:
- tokenizer: HuggingFace's tokenizer
- model: HuggingFace MaskedLM models
- model_name: str, name of model
- df: pandas dataframe that keeps our information
- loadFromFile: whether we're gonna skip this step or not
"""
new_df = df.copy()
if loadFromFile:
return new_df
def process_data_row(row, num_selecting=top):
word = row["Word"]
############################## ENDING WITH A "." ###################################
# use bert generation to select the top 10 words
affirmative = "The (OBJECT) is".replace("(OBJECT)", word)
negative = "The (OBJECT) is not".replace("(OBJECT)", word)
# top k affirmative and negative
top_ten_aff_dot = one_mask_generation(tokenizer, model, affirmative, num_selecting, ending=".")
top_ten_neg_dot = one_mask_generation(tokenizer, model, negative, num_selecting, ending=".")
# find the intersection
common_dot = set(top_ten_aff_dot).intersection(set(top_ten_neg_dot))
############################## ENDING WITH A "," ###################################
# top 10 affirmative and negative
top_ten_aff_comma = one_mask_generation(tokenizer, model, affirmative, num_selecting, ending=",")
top_ten_neg_comma = one_mask_generation(tokenizer, model, negative, num_selecting, ending=",")
# find the intersection
common_comma = set(top_ten_aff_comma).intersection(set(top_ten_neg_comma))
############################# UNION THEM TOGETHER ##################################
common = common_dot.union(common_comma)
return ",".join([e for e in common])
if model_name + "_common" not in new_df.columns:
new_df[model_name + "_common"] = new_df.apply(lambda x: process_data_row(x), axis=1)
return new_df
if __name__ == "__main__":
args = parse_args()
DATA_PATH = args.d
NUM_SELECTING = args.top
OUT_PATH = args.o + f"things_k{NUM_SELECTING}.tsv"
try:
# if this works, it means that there has been a file here before, and we will load from it and we will go from there
DATA = pd.read_csv(OUT_PATH, sep="\t")
except Exception as e:
# if the path is not valid, we'll load from source
DATA = pd.read_csv(DATA_PATH, sep="\t")
DATA = DATA[["Word"]]
## Loop cross models
for m_name in tqdm(MODEL_NAMES):
tknzr, mdl = load_model_and_tokenizer(m_name)
DATA = find_common_given_model(tknzr, mdl, m_name, DATA, top=NUM_SELECTING)
DATA.to_csv(OUT_PATH, index=False, sep="\t")
| 43.565657 | 124 | 0.664735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,648 | 0.382101 |
0feb6e540faea9556a7188bf2180fcd15abce9ef | 1,279 | py | Python | qf_lib/documents_utils/document_exporting/element/custom.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 198 | 2019-08-16T15:09:23.000Z | 2022-03-30T12:44:00.000Z | qf_lib/documents_utils/document_exporting/element/custom.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 13 | 2021-01-07T10:15:19.000Z | 2022-03-29T13:01:47.000Z | qf_lib/documents_utils/document_exporting/element/custom.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 29 | 2019-08-16T15:21:28.000Z | 2022-02-23T09:53:49.000Z | # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qf_lib.common.enums.grid_proportion import GridProportion
from qf_lib.documents_utils.document_exporting.document import Document
from qf_lib.documents_utils.document_exporting.element import Element
class CustomElement(Element):
def __init__(self, html: str, grid_proportion=GridProportion.Eight):
"""
An element containing custom HTML.
"""
super().__init__(grid_proportion)
self.html = html
def generate_html(self, document: Document) -> str:
"""
Generates the HTML that represents the underlying element.
"""
return self.html
| 38.757576 | 78 | 0.715403 | 409 | 0.319282 | 0 | 0 | 0 | 0 | 0 | 0 | 790 | 0.616706 |
0feb7089dc2957a91fab8aa02b4296dd72cb381a | 214 | py | Python | api/urls.py | GHImplementationTeam/referrals | fe00e97e208f0d6e451653cd1586f51b4a3e9720 | [
"MIT"
] | null | null | null | api/urls.py | GHImplementationTeam/referrals | fe00e97e208f0d6e451653cd1586f51b4a3e9720 | [
"MIT"
] | null | null | null | api/urls.py | GHImplementationTeam/referrals | fe00e97e208f0d6e451653cd1586f51b4a3e9720 | [
"MIT"
] | null | null | null | from django.conf.urls import url
import referrals
urlpatterns = [
url(r'^referrals/$', referrals.ReferralsView.as_view()),
url(r'^referral/(?P<referral_id>[-&\w]+)/$', referrals.ReferralView.as_view()),
]
| 26.75 | 83 | 0.691589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.252336 |
0febcd6f09e5ceb6936fd4c607ad690f91da7471 | 194 | py | Python | venv/webscrap_teste.py | pgfjunior/teste_web_scraping | fb8de63863287c9714a49849c9e2866def65f068 | [
"MIT"
] | null | null | null | venv/webscrap_teste.py | pgfjunior/teste_web_scraping | fb8de63863287c9714a49849c9e2866def65f068 | [
"MIT"
] | null | null | null | venv/webscrap_teste.py | pgfjunior/teste_web_scraping | fb8de63863287c9714a49849c9e2866def65f068 | [
"MIT"
] | null | null | null | import urllib.request
from bs4 import BeautifulSoup
page = urllib.request.urlopen('https://www.dentalcremer.com.br/')
soup = BeautifulSoup(page, "html.parser")
print(soup.find_all('table'))
| 19.4 | 65 | 0.757732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.278351 |
0fed33d3830cf5a78951341d11b17322020b73f1 | 18,691 | py | Python | datasets/ner_dataset.py | saiajaym/MetaLearningForNER | 88009aa478645685e5bacef762e896c9ca1ecad9 | [
"MIT"
] | 3 | 2021-05-12T15:16:02.000Z | 2021-11-02T05:23:56.000Z | datasets/ner_dataset.py | saiajaym/MetaLearningForNER | 88009aa478645685e5bacef762e896c9ca1ecad9 | [
"MIT"
] | 2 | 2021-08-07T01:59:57.000Z | 2022-03-23T09:46:30.000Z | datasets/ner_dataset.py | saiajaym/MetaLearningForNER | 88009aa478645685e5bacef762e896c9ca1ecad9 | [
"MIT"
] | 2 | 2020-12-27T22:31:14.000Z | 2021-04-02T17:36:35.000Z | import itertools
import json
import os
import random
from collections import defaultdict, Counter
from tqdm.auto import tqdm, trange
from torch.nn import CrossEntropyLoss
from torch.utils import data
import numpy as np
from datasets import utils
class NERSampler:
def __init__(self, dataset, labels, label_map, n_cls, n_shot, n_query=5, n_batch=100):
print (f'Number of examples in NER dataset is {len(dataset)}')
self.labels = labels
self.classes = set()
for lab in labels:
if len(lab) > 2:
self.classes.add(lab[2:])
self.label_map = label_map
self.n_cls = n_cls
self.n_shot = n_shot
self.n_query = n_query
self.n_batch = n_batch
self.dataset = dataset
print ("{}-way {}-shot with {}-query and {} batchsize".format(self.n_cls, self.n_shot, self.n_query, self.n_batch))
self.sent_class_map, self.class_sent_map = self._get_sent_class_maps(dataset)
# stats on data
print ('## STATISTICS ##')
for cls in self.class_sent_map:
print (cls, len(self.class_sent_map[cls]))
self.data = self.make_episodes()
def make_episodes(self):
"""
Sample mini-batches for episode training
"""
tags_epi, sup_epi, query_epi = [], [], []
for _ in trange(self.n_batch):
classes = self._sample_classes()
# print ("sampled classes", classes)
tags = defaultdict(lambda:-1)
tags['O'] = 0
for cls in classes:
if cls not in tags:
tags[cls] = len(tags)
sup_sents, query_sents = self.sample_sentences(classes, tags)
# print ('sampled support labels', sup_sents.labels)
# print ('sampled query labels', query_sents.labels)
tags_epi.append(tags)
sup_epi.append(sup_sents)
query_epi.append(query_sents)
return tags_epi, sup_epi, query_epi
def __getitem__(self, index):
return self.data[0][index], self.data[1][index], self.data[2][index]
def __len__(self):
return self.n_batch
@staticmethod
def _get_sent_class_maps(dataset):
# map from a sentence Id to a list of pairs with
# B-Xs and the freqs of B-X in the sentence
sent_class_map = defaultdict(list)
# map from B-X to a list of pairs with
# sentence ids and the freqs of B-X in the sentence
class_sent_map = defaultdict(list)
for i, sent in enumerate(dataset):
_, tags = sent.words, sent.labels
class_freqs = Counter()
for tag in tags:
if tag.startswith('B-'):
# we only store the `X` part of `B-X`
class_freqs[tag[2:]] += 1
for cls, freq in class_freqs.items():
sent_class_map[i].append((cls, freq))
class_sent_map[cls].append((i, freq))
return sent_class_map, class_sent_map
def tagged_labels(self, labels, tags):
t_labels = []
for lab in labels:
if len(lab) > 2: lab = lab[2:]
if lab not in tags:
t_labels.append(-1)
else:
t_labels.append(tags[lab])
return t_labels
def sample_sentences(self, classes, tags):
"""
Sample support and query sentences. A greedy algorithm is implemented
that always sample less freqent classes first.
:param classes: the entity classes of interests
:param n_shot: the number of support points
:param n_query: the number of query points
:return: two lists of sentence Ids for support and query sets
respectively
"""
sup_sents, query_sents = [], []
# sample support set
sampled_cls_counters = {cls: 0 for cls in classes}
for cls in classes:
# not enough sentences for the class, so sample with replacement
replacement = (len(self.class_sent_map[cls]) < self.n_shot)
while sampled_cls_counters[cls] < self.n_shot:
sent, _ = random.choice(self.class_sent_map[cls])
if not replacement and sent in sup_sents:
continue
for inn_cls, freq in self.sent_class_map[sent]:
if inn_cls in sampled_cls_counters:
sampled_cls_counters[inn_cls] += freq
sup_sents.append(sent)
# sample query set
sampled_cls_counters = {cls: 0 for cls in classes}
for cls in classes:
# not enough sentences for the class, so sample with replacement
replacement = (len(self.class_sent_map[cls]) < self.n_shot + self.n_query)
while sampled_cls_counters[cls] < self.n_query:
sent, _ = random.choice(self.class_sent_map[cls])
if not replacement and (sent in sup_sents
or sent in query_sents):
continue
for inn_cls, freq in self.sent_class_map[sent]:
if inn_cls in sampled_cls_counters:
sampled_cls_counters[inn_cls] += freq
query_sents.append(sent)
return MetaNERDataset(
[self.dataset[d].words for d in sup_sents],
[self.tagged_labels(self.dataset[d].labels, tags) for d in sup_sents],
self.n_cls + 1
), MetaNERDataset(
[self.dataset[d].words for d in query_sents],
[self.tagged_labels(self.dataset[d].labels, tags) for d in query_sents],
self.n_cls + 1
)
def _sample_classes(self):
"""
Subsample entity classes, sorted by frequencies
:param targets: target classes to sample from
:param n_cls: num of entity classes to sample
:return: a list of classes
"""
sorted_list = []
for cls, val in self.class_sent_map.items():
if cls not in self.classes:
continue
sorted_list.append((cls, len(val)))
assert len(sorted_list) >= self.n_cls
random.shuffle(sorted_list)
sorted_list = sorted_list[:self.n_cls]
sorted_list = sorted(sorted_list, key=lambda p: p[1])
return [cls for cls, _ in sorted_list]
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
def read_examples_from_file(data_dir, valid_labels):
print (f'valid labels: {valid_labels}')
file_path = data_dir
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line.strip() == "":
if words:
for i, label in enumerate(labels):
if label not in valid_labels:
labels[i] = 'O'
examples.append(InputExample(guid="{}".format(guid_index), words=words, labels=labels))
guid_index += 1
words = []
labels = []
else:
splits = line.split()
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
for i, label in enumerate(labels):
if label not in valid_labels:
labels[i] = 'O'
examples.append(InputExample(guid="{}".format(guid_index), words=words, labels=labels))
label_map = defaultdict(int)
for i, label in enumerate(valid_labels): # assumption that valid_labels[0] == 'O'
if label == 'O':
label_map[label] = i
else:
if label[2:] not in label_map:
label_map[label[2:]] = len(label_map)
return examples, label_map
def get_labels(path):
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class MetaNERDataset(data.Dataset):
def __init__(self, sentences, labels, n_classes):
self.sentences = sentences
self.labels = labels
self.n_classes = n_classes
def __len__(self):
return len(self.sentences)
def __getitem__(self, index):
return self.sentences[index], self.labels[index]
# class MetaNERDataset(data.Dataset):
# def __init__(self, file_name):
# json_dict = utils.read_json(file_name)
# self.sentences, self.labels = [], []
# for entry in json_dict:
# self.sentences.append(entry['sentence'])
# self.labels.append(entry['label'])
# self.n_classes = np.max(list(itertools.chain(*self.labels))) + 1
# def __len__(self):
# return len(self.sentences)
# def __getitem__(self, index):
# return self.sentences[index], self.labels[index]
class SequentialSampler:
def __init__(self, dataset, labels, label_map, n_cls, n_shot, n_query=5, n_batch=100):
print (f'Number of examples in NER dataset is {len(dataset)}')
self.labels = labels
self.classes = set()
for lab in labels:
if len(lab) > 2:
self.classes.add(lab[2:])
self.label_map = label_map
self.n_cls = n_cls
self.n_shot = n_shot
self.n_query = n_query
self.n_batch = n_batch
self.dataset = dataset
print ("{}-way {}-shot with {}-query and {} batchsize".format(self.n_cls, self.n_shot, self.n_query, self.n_batch))
self.sent_class_map, self.class_sent_map = self._get_sent_class_maps(dataset)
# stats on data
print ('## STATISTICS ##')
for cls in self.class_sent_map:
print (cls, len(self.class_sent_map[cls]))
self.data = self.make_episodes()
def make_episodes(self):
"""
Sample mini-batches for episode training
"""
tags_epi, sup_epi, query_epi = [], [], []
for _ in trange(self.n_batch):
classes = self._sample_classes()
# print ("sampled classes", classes)
tags = defaultdict(lambda:-1)
tags['O'] = 0
for cls in classes:
if cls not in tags:
tags[cls] = len(tags)
sup_sents = self.sample_support_sentences(classes,tags)
for i in range(int(len(self.dataset)/(self.n_cls*self.n_shot))):
query_sents = self.sample_query_sentences(classes, tags, i)
tags_epi.append(tags)
sup_epi.append(sup_sents)
query_epi.append(query_sents)
return tags_epi, sup_epi, query_epi
def __getitem__(self, index):
return self.data[0][index], self.data[1][index], self.data[2][index]
def __len__(self):
return self.n_batch
@staticmethod
def _get_sent_class_maps(dataset):
# map from a sentence Id to a list of pairs with
# B-Xs and the freqs of B-X in the sentence
sent_class_map = defaultdict(list)
# map from B-X to a list of pairs with
# sentence ids and the freqs of B-X in the sentence
class_sent_map = defaultdict(list)
for i, sent in enumerate(dataset):
_, tags = sent.words, sent.labels
class_freqs = Counter()
for tag in tags:
if tag.startswith('B-'):
# we only store the `X` part of `B-X`
class_freqs[tag[2:]] += 1
for cls, freq in class_freqs.items():
sent_class_map[i].append((cls, freq))
class_sent_map[cls].append((i, freq))
return sent_class_map, class_sent_map
def tagged_labels(self, labels, tags):
return [
tags[lab[2:]] if len(lab) > 2 else tags[lab]
for lab in labels
]
def sample_support_sentences(self, classes, tags):
"""
Sample support and query sentences. A greedy algorithm is implemented
that always sample less freqent classes first.
:param classes: the entity classes of interests
:param n_shot: the number of support points
:param n_query: the number of query points
:return: two lists of sentence Ids for support and query sets
respectively
"""
sup_sents = []
# sample support set
sampled_cls_counters = {cls: 0 for cls in classes}
for cls in classes:
# not enough sentences for the class, so sample with replacement
replacement = (len(self.class_sent_map[cls]) < self.n_shot)
while sampled_cls_counters[cls] < self.n_shot:
sent, _ = random.choice(self.class_sent_map[cls])
if not replacement and sent in sup_sents:
continue
for inn_cls, freq in self.sent_class_map[sent]:
if inn_cls in sampled_cls_counters:
sampled_cls_counters[inn_cls] += freq
sup_sents.append(sent)
return MetaNERDataset(
[self.dataset[d].words for d in sup_sents],
[self.tagged_labels(self.dataset[d].labels, tags) for d in sup_sents],
self.n_cls+1
)
def sample_query_sentences(self, classes, tags, i):
"""
Sample support and query sentences. A greedy algorithm is implemented
that always sample less freqent classes first.
:param classes: the entity classes of interests
:param n_shot: the number of support points
:param n_query: the number of query points
:return: two lists of sentence Ids for support and query sets
respectively
"""
query_sents = [d for d in range(i*self.n_cls*self.n_shot,(i+1)*self.n_cls*self.n_shot)]
return MetaNERDataset(
[self.dataset[d].words for d in query_sents],
[self.tagged_labels(self.dataset[d].labels, tags) for d in query_sents],
self.n_cls+1
)
def _sample_classes(self):
"""
Subsample entity classes, sorted by frequencies
:param targets: target classes to sample from
:param n_cls: num of entity classes to sample
:return: a list of classes
"""
sorted_list = []
for cls, val in self.class_sent_map.items():
if cls not in self.classes:
continue
sorted_list.append((cls, len(val)))
assert len(sorted_list) >= self.n_cls
random.shuffle(sorted_list)
sorted_list = sorted_list[:self.n_cls]
sorted_list = sorted(sorted_list, key=lambda p: p[1])
return [cls for cls, _ in sorted_list]
class SupervisedSampler:
def __init__(self, dataset, labels, batch_size=30):
print (f'Number of examples in NER dataset is {len(dataset)}')
self.labels = labels
self.classes = []
for lab in labels:
if len(lab) > 2:
self.classes.append(lab[2:])
self.batch_size = batch_size
self.n_batch = len(dataset) // self.batch_size
self.dataset = dataset
self.sent_class_map, self.class_sent_map = self._get_sent_class_maps(dataset)
# stats on data
print ('## STATISTICS ##')
for cls in self.class_sent_map:
print (cls, len(self.class_sent_map[cls]))
self.data = self.make_batches()
def make_batches(self):
"""
Sample mini-batches for episode training
"""
batches = []
tags = defaultdict(lambda:-1)
tags['O'] = 0
for cls in self.classes:
if cls not in tags:
tags[cls] = len(tags)
self.tags = tags
random.shuffle(self.dataset)
for i in trange(self.n_batch):
batch = self.sample_batch_sentences(i*self.batch_size, self.batch_size)
batches.append(batch)
return batches
def __getitem__(self, index):
return self.tags, self.data[index]
def __len__(self):
return self.n_batch
@staticmethod
def _get_sent_class_maps(dataset):
# map from a sentence Id to a list of pairs with
# B-Xs and the freqs of B-X in the sentence
sent_class_map = defaultdict(list)
# map from B-X to a list of pairs with
# sentence ids and the freqs of B-X in the sentence
class_sent_map = defaultdict(list)
for i, sent in enumerate(dataset):
_, tags = sent.words, sent.labels
class_freqs = Counter()
for tag in tags:
if tag.startswith('B-'):
# we only store the `X` part of `B-X`
class_freqs[tag[2:]] += 1
for cls, freq in class_freqs.items():
sent_class_map[i].append((cls, freq))
class_sent_map[cls].append((i, freq))
return sent_class_map, class_sent_map
def tagged_labels(self, labels, tags):
t_labels = []
for lab in labels:
if len(lab) > 2: lab = lab[2:]
if lab not in tags:
t_labels.append(-1)
else:
t_labels.append(tags[lab])
return t_labels
def sample_batch_sentences(self, startIdx, batch_size):
sents = list(range(startIdx, startIdx + batch_size))
return MetaNERDataset(
[self.dataset[d].words for d in sents],
[self.tagged_labels(self.dataset[d].labels, self.tags) for d in sents],
len(self.classes)
)
| 37.307385 | 123 | 0.569311 | 15,934 | 0.852496 | 0 | 0 | 2,571 | 0.137553 | 0 | 0 | 4,493 | 0.240383 |
0fedad77ac88f1a6c75d6f7e75f5be3e0ab52f87 | 1,994 | py | Python | tests/test_testoot/conftest.py | sobolevn/testoot | bd1c19da6a232b1599836275c5026661a41e3c4a | [
"MIT"
] | 2 | 2020-04-19T13:48:32.000Z | 2020-05-02T17:43:55.000Z | tests/test_testoot/conftest.py | sobolevn/testoot | bd1c19da6a232b1599836275c5026661a41e3c4a | [
"MIT"
] | 14 | 2020-05-02T16:31:57.000Z | 2020-05-10T20:07:58.000Z | tests/test_testoot/conftest.py | aptakhin/regress | 83e07b2cd745f5f5dc733edbd126bedbb5b2abf3 | [
"MIT"
] | 1 | 2020-05-20T12:04:12.000Z | 2020-05-20T12:04:12.000Z | from typing import Optional
import pytest
from testoot.base import TestootContext, Comparator, TestootSerializer, \
FileType, TestootTestResult
from testoot.ext.pytest import PytestContext
from testoot.testoot import Testoot
from tests.conftest import AbcDiffResult
@pytest.fixture(scope='module')
def base_testoot(root_base_testoot):
testoot = root_base_testoot.clone(
storage=root_base_testoot.storage.clone(add_path='examples'),
)
testoot.storage.ensure_exists()
yield testoot
@pytest.fixture(scope='function')
def testoot(base_testoot, request):
testoot = Testoot(base_testoot, PytestContext(request))
yield testoot
class TrueComparator(Comparator):
@classmethod
def compare(cls, test_obj: any, canon_obj: any):
assert True
class FalseComparator(Comparator):
@classmethod
def compare(cls, test_obj: any, canon_obj: any):
assert False
class ContextTestoot(TestootContext):
def __init__(self, name, comparator: Optional[Comparator] = None,
serializer: Optional[TestootSerializer] = None,
ask_canonize: bool = False):
self._name = name
self._comparator = (TrueComparator() if comparator is None
else comparator)
self._serializer = serializer
self._ask_canonize = ask_canonize
def get_storage_name(self, file_type_hint: FileType,
suffix: Optional[str] = None):
return self._name
def get_storage_name_from_filename(self, filename: str):
return filename
def get_comparator(self) -> Optional[Comparator]:
return self._comparator
def get_serializer(self) -> Optional[TestootSerializer]:
return self._serializer
def ask_canonize(self) -> bool:
return self._ask_canonize
def create_test_result(self, test_obj: any, canon_obj: any,
exc: Exception) -> TestootTestResult:
return AbcDiffResult()
| 29.323529 | 73 | 0.69007 | 1,323 | 0.66349 | 318 | 0.159478 | 555 | 0.278335 | 0 | 0 | 28 | 0.014042 |
0fee2372d6f0f14e825a2cb46d667f12dbab810f | 1,109 | py | Python | peptidemapper/src/pepmapperapp/migrations/0002_mapperautocomplete.py | uvic-proteincentre/MRMAssayDB | 12b19a2064fe9b8006f6457500c9cb79b1b829ed | [
"Apache-2.0"
] | null | null | null | peptidemapper/src/pepmapperapp/migrations/0002_mapperautocomplete.py | uvic-proteincentre/MRMAssayDB | 12b19a2064fe9b8006f6457500c9cb79b1b829ed | [
"Apache-2.0"
] | null | null | null | peptidemapper/src/pepmapperapp/migrations/0002_mapperautocomplete.py | uvic-proteincentre/MRMAssayDB | 12b19a2064fe9b8006f6457500c9cb79b1b829ed | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pepmapperapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MapperAutoComplete',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uniprotacc', models.CharField(max_length=20)),
('prot_name', models.CharField(max_length=300)),
('gene', models.CharField(max_length=100)),
('organism', models.CharField(max_length=100)),
('pepseq', models.CharField(max_length=20)),
('path_name', models.CharField(max_length=1000)),
('dis_mut', models.CharField(max_length=1000)),
('go_id', models.CharField(max_length=100)),
('go_name', models.CharField(max_length=1000)),
('go_term', models.CharField(max_length=100)),
],
),
]
| 35.774194 | 114 | 0.574391 | 1,000 | 0.901713 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.154193 |
0fefc8f742a0c3f16da7678d4d39eaa122d3604d | 669 | py | Python | stream/utils.py | maddevsio/yourcast-web | c3e897f28dc16f71e4f625564270c0d3b72fa53f | [
"MIT"
] | 8 | 2017-05-12T10:08:03.000Z | 2020-12-22T00:01:39.000Z | stream/utils.py | maddevsio/yourcast-web | c3e897f28dc16f71e4f625564270c0d3b72fa53f | [
"MIT"
] | null | null | null | stream/utils.py | maddevsio/yourcast-web | c3e897f28dc16f71e4f625564270c0d3b72fa53f | [
"MIT"
] | 6 | 2017-05-12T13:35:43.000Z | 2021-08-09T13:43:31.000Z | # -*- coding: utf-8 -*-
import json
from random import shuffle
def serialize_stream(stream):
items = stream.youtube_links.split("\n")
if stream.play_random:
shuffle(items)
return {
"id": stream.pk,
"name": stream.name,
"slug": stream.slug,
"keywords": stream.keywords,
"plex_playlist_id": stream.plex_playlist_id,
"channels": stream.channels,
"update_frequency": stream.update_frequency,
"video_length": stream.video_length,
"is_news": stream.is_news,
"play_random": stream.play_random,
"links": [{
"url": link
} for link in items]
}
| 25.730769 | 52 | 0.593423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.219731 |
0ff35978def3b7c4ac27b01bea75d781f15a6451 | 1,753 | py | Python | laterpay/compat.py | laterpay/laterpay-client-python | c75e13408f6900202108f52e386e1c8c719e5377 | [
"MIT"
] | 3 | 2015-09-16T13:32:41.000Z | 2015-09-16T15:10:20.000Z | laterpay/compat.py | laterpay/laterpay-client-python | c75e13408f6900202108f52e386e1c8c719e5377 | [
"MIT"
] | 81 | 2015-02-05T07:05:50.000Z | 2020-06-02T11:27:24.000Z | laterpay/compat.py | laterpay/laterpay-client-python | c75e13408f6900202108f52e386e1c8c719e5377 | [
"MIT"
] | 1 | 2016-12-14T12:26:38.000Z | 2016-12-14T12:26:38.000Z | # -*- coding: utf-8 -*-
import warnings
import six
def encode_if_unicode(value, encoding='utf-8'): # pragma: no cover
"""
Encode and return a ``value`` using specified ``encoding``.
Encoding is done only if ``value`` is a ``unicode`` instance
(utf-8 encoding is used as default).
.. deprecated:: 5.0.0
Use :func:`laterpay.compat.stringify` instead.
"""
warnings.warn(
'laterpay.compat.encode_if_unicode is deprecated and will be removed '
'in future versions. Use laterpay.compat.stringify instead',
DeprecationWarning
)
if six.PY2 and isinstance(value, six.text_type):
value = value.encode(encoding)
return value
def stringify(value):
"""
Convert ``value`` into a native Python string.
If value is not a byte- or unicode-string the function calls ``str()`` on
it.
If the value then is a unicode string (on Python 2) or byte string (on
Python 3) the function converts it into the respective native string type
(byte string on Python 2; unicode string on Python 3).
In all other cases the value is returned as-is.
"""
if not isinstance(value, (six.string_types, six.binary_type)):
# If any non-string or non-bytes like objects, ``str()`` them.
value = str(value)
if six.PY3 and isinstance(value, six.binary_type):
# Issue #84, decode byte strings before using them on Python 3
value = value.decode()
elif six.PY2 and isinstance(value, six.text_type):
value = value.encode('utf-8')
return value
def byteify(value):
"""
Convert ``value`` into a byte-string.
"""
if isinstance(value, six.text_type):
return value.encode('utf-8')
return value
| 29.216667 | 78 | 0.649173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,048 | 0.597832 |
0ff3a9bcd7a1323504d7ab075a98f49049806831 | 832 | py | Python | main.py | irahorecka/craigslist-housing-subscription | 389c325dc30526eaed4c2333f5dd4d60d7939a13 | [
"MIT"
] | null | null | null | main.py | irahorecka/craigslist-housing-subscription | 389c325dc30526eaed4c2333f5dd4d60d7939a13 | [
"MIT"
] | null | null | null | main.py | irahorecka/craigslist-housing-subscription | 389c325dc30526eaed4c2333f5dd4d60d7939a13 | [
"MIT"
] | null | null | null | """
Main file to find and send user Craigslist housing posts.
"""
import time
import users
import posts
import mail
def main():
"""Main app to execute subscription based email notifications."""
users_json = users.get_users()
# At start of subscription, drop all content and populate db without sending email
posts.drop_contents()
for _ in posts.get(users_json):
pass
while True:
# Sleep for a day (-80 seconds) to fetch posts
print("sleeping...")
time.sleep(86320)
# Get users' json file for every iteration - allows for updates during operation
users_json = users.get_users()
user_posts = zip(users_json, posts.get(users_json))
for user, post in user_posts:
mail.write_email(user, post)
if __name__ == "__main__":
main()
| 26 | 88 | 0.661058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.433894 |
0ff3aa2c46383feaa45de6cad4e80e2f67da3cda | 790 | py | Python | python/1469_Find_All_Lonely_Nodes.py | liaison/LeetCode | 8b10a1f6bbeb3ebfda99248994f7c325140ee2fd | [
"MIT"
] | 17 | 2016-03-01T22:40:53.000Z | 2021-04-19T02:15:03.000Z | python/1469_Find_All_Lonely_Nodes.py | liaison/LeetCode | 8b10a1f6bbeb3ebfda99248994f7c325140ee2fd | [
"MIT"
] | null | null | null | python/1469_Find_All_Lonely_Nodes.py | liaison/LeetCode | 8b10a1f6bbeb3ebfda99248994f7c325140ee2fd | [
"MIT"
] | 3 | 2019-03-07T03:48:43.000Z | 2020-04-05T01:11:36.000Z | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def getLonelyNodes(self, root: TreeNode) -> List[int]:
lonely_nodes = []
def dfs(node, is_lonely):
if node is None:
return
if is_lonely:
lonely_nodes.append(node.val)
if (node.left is None) ^ (node.right is None):
is_lonely = True
else:
is_lonely = False
dfs(node.left, is_lonely)
dfs(node.right, is_lonely)
dfs(root, False)
return lonely_nodes | 28.214286 | 58 | 0.486076 | 598 | 0.756962 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.235443 |
0ff63aac37f72e1d8009c51dfe97ba6cd5e1b4db | 4,069 | py | Python | models/discrete_models/compression/compress_N_fast.py | openworm/behavioral_syntax | afc7c9866c23bb5d6ff05c8cddf5abec7f17a8d4 | [
"MIT"
] | 3 | 2017-08-15T19:33:13.000Z | 2021-03-06T16:37:15.000Z | models/discrete_models/compression/compress_N_fast.py | AidanRocke/behavioral_syntax_in_Python | afc7c9866c23bb5d6ff05c8cddf5abec7f17a8d4 | [
"MIT"
] | 31 | 2015-07-05T02:28:44.000Z | 2016-02-17T06:47:45.000Z | models/discrete_models/compression/compress_N_fast.py | AidanRocke/behavioral_syntax_in_Python | afc7c9866c23bb5d6ff05c8cddf5abec7f17a8d4 | [
"MIT"
] | 1 | 2016-02-19T09:40:16.000Z | 2016-02-19T09:40:16.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 21:55:56 2015
@author: aidanrocke
"""
import numpy as np
def compressSequenceNFast(posture_seq, newStart, nMax):
"""
# COMPRESSSEQUENCE Recursively finds the most compressive subsequence in
# posture_seq and creates and replaces it with a new number. This replacement
# creates a new rule in the grammar. Replacements are made until there are
# none left that lead to further compression. See the following paper
# for more details: Nevill-Manning and Witten (2000) On-Line and Off-Line
# Heuristics for Inferring Hierarchies of Repetitions in Sequences.
# Proceedings of the IEEE 88:1745.
#
# Input
# posture_seq - a list of posture sequences to be compressed
# newStart - this is the number that will be used to label the first new
# rule in the grammar. It must be greater than the maximum
# value in posture_seq. If empty, then max(posture_seq) + 1 is used.
# nMax - the maximum length n-gram to check for compression
#
# Output
# grammar - a number of rules by 2 cell array. The first column has the
# left hand side of each replacement rule while the second
# column has the right hand side (so the first column lists
# all non-terminals in the grammar).
# compVec - the vector that has been compressed using grammar. posture_seq
# can be recovered by applying the grammar rules in reverse.
# totSavings - the total space saving achieved during the compression,
# taking into account the size of the created grammar rules"""
# check posture_seq
if len(np.shape(posture_seq)) > 1:
raise ValueError('posture_seq must be a row vector.')
# define newStart if left empty
if newStart == 0:
newStart = max(posture_seq) + 1
# check that newStart is large enough
if newStart <= max(posture_seq):
raise ValueError('newStart must be greater than max(posture_seq).')
# initialise grammar
grammar = [[0,[0,0]]]
# initialise compVec and make a suffix array
compVec = posture_seq
totSavings = 0
# compress segments until none are found that lead to compression
sequence = [np.nan]
newInd = newStart
while len(sequence) > 0:
# find the most compressive sequence in posture_seq
[sequence, locations, savings] = compressiveNFast(compVec, nMax)
# update the total savings (i.e. compression)
totSavings = totSavings + savings
# add the rule to grammar
grammar.append([newInd,sequence])
# make the replacements. Note: strrep does not work here. For example
# if sequence is [44 68 44] and compVec has a subsequence that is
# [44 68 44 68 44 68 44 448], strrep will give [68 480 480 480 448]
# which is wrong.
for j in range(len(locations)):
compVec[locations[j]:locations[j] + len(sequence) - 1] = [newInd]+[np.nan]*(len(sequence)-1)
while compVec.count(np.nan) > 0:
compVec.remove(np.nan)
newInd += 1
# check that compressed lengths, savings, and grammar size are
# consistent
if len(sequence) > 0: # on last iteration last grammar entry is empty
if len(compVec) + totSavings + len(grammar) + np.sum(len(grammar[i][1]) for i in range(len(grammar))) != len(posture_seq):
raise ValueError(['Calculated savings not consistent with original and compressed lengths and grammar size.'])
else:
if len(compVec) + totSavings + len(grammar)-1 + np.sum(len(grammar[i][1]) for i in range(len(grammar))) != len(posture_seq):
ValueError(['Calculated savings not consistent with original and compressed lengths and grammar size.'])
# remove the last (empty) entry of the grammar
return grammar[1:-1]
| 42.385417 | 136 | 0.634062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,610 | 0.641435 |
0ff6b96f6cdfa0859ccdfa2be4df46f2d40daa71 | 2,631 | py | Python | hdmi/cores/transmitter/convert_30_to_15.py | srivatsan-ramesh/HDMI-Source-Sink-Modules | 00b99db3d50f9f88f74f0d1685cddcbe35ba1933 | [
"MIT"
] | 10 | 2016-05-08T11:41:40.000Z | 2021-11-16T08:28:06.000Z | hdmi/cores/transmitter/convert_30_to_15.py | srivatsan-ramesh/HDMI-Source-Sink-Modules | 00b99db3d50f9f88f74f0d1685cddcbe35ba1933 | [
"MIT"
] | 3 | 2016-05-18T17:36:14.000Z | 2016-06-23T17:42:37.000Z | hdmi/cores/transmitter/convert_30_to_15.py | srivatsan-ramesh/HDMI-Source-Sink-Modules | 00b99db3d50f9f88f74f0d1685cddcbe35ba1933 | [
"MIT"
] | 2 | 2016-05-21T13:53:20.000Z | 2016-06-21T22:05:42.000Z | from myhdl import Signal, intbv, always, always_comb, block, instances
from hdmi.cores.primitives import dram16xn
@block
def convert_30_to_15(reset, clock, clockx2, data_in, tmds_data2, tmds_data1, tmds_data0):
"""
The block converts the 30-bit data into 15-bit data.
Args:
reset: The reset signal
clock: The pixel clock
clockx2: The clock with twice the frequency of pixel clock
data_in: The input 30-bit data
tmds_data2: 5 bits of the output data (output[15:10])
tmds_data1: 5 bits of the output data (output[10:5])
tmds_data0: 5 bits of the output data (output[5:0])
Returns:
myhdl.instances() : A list of myhdl instances.
"""
# RAM Address
write_addr, _write_addr, read_addr, _read_addr = [Signal(intbv(0)[4:0]) for _ in range(4)]
data_int = Signal(intbv(0)[30:0])
@always(write_addr)
def case_wa():
if write_addr < 15:
_write_addr.next = write_addr + 1
else:
_write_addr.next = 0
@always(clock.posedge, reset.posedge)
def fdc():
if reset:
write_addr.next = 0
else:
write_addr.next = _write_addr
o_data_out = Signal(intbv(0)[30:0]) # Dummy variable
fifo_u = dram16xn(data_in, write_addr, read_addr, Signal(True), clock, o_data_out, data_int)
@always(read_addr)
def case_ra():
if read_addr < 15:
_read_addr.next = read_addr + 1
else:
_read_addr.next = 0
reset_sync, _reset_sync, reset_p = [Signal(bool(0)) for _ in range(3)]
sync = Signal(bool(0))
@always(clockx2.posedge, reset.posedge)
def fdp():
if reset:
reset_sync.next = 1
else:
reset_sync.next = reset
@always(clockx2.posedge)
def fdr():
if reset_p:
sync.next = 0
else:
sync.next = not sync
@always(clockx2.posedge)
def fdre():
if reset_p:
read_addr.next = 0
elif sync:
read_addr.next = _read_addr
db = Signal(intbv(0)[30:0])
@always(clockx2.posedge)
def fde():
if sync:
db.next = data_int
mux = Signal(intbv(0)[15:0])
@always_comb
def mux_logic():
if not sync:
mux.next = db[15:0]
else:
mux.next = db[30:15]
@always(clockx2.posedge)
def fd():
_reset_sync.next = reset_sync
reset_p.next = _reset_sync
tmds_data0.next = mux[5:0]
tmds_data1.next = mux[10:5]
tmds_data2.next = mux[15:10]
return instances()
| 24.361111 | 96 | 0.581908 | 0 | 0 | 0 | 0 | 2,513 | 0.95515 | 0 | 0 | 531 | 0.201824 |
0ff6fb1c2b8d1844c202ef54247d1d380ce729a2 | 10,377 | py | Python | metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_commands.py | slab14/metron | 52bd310fcce68dad15eead57f1113092a30d9791 | [
"Apache-2.0"
] | 1 | 2017-02-07T03:31:44.000Z | 2017-02-07T03:31:44.000Z | metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_commands.py | slab14/metron | 52bd310fcce68dad15eead57f1113092a30d9791 | [
"Apache-2.0"
] | 2 | 2017-06-22T18:03:12.000Z | 2017-06-25T03:51:47.000Z | metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_commands.py | slab14/metron | 52bd310fcce68dad15eead57f1113092a30d9791 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from datetime import datetime
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Directory, Execute, File
from resource_management.libraries.functions import get_user_call_output
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.show_logs import show_logs
import metron_service
from metron_security import kinit
# Wrap major operations and functionality in this class
class RestCommands:
__params = None
__kafka_configured = False
__kafka_acl_configured = False
__hbase_configured = False
__hbase_acl_configured = False
def __init__(self, params):
if params is None:
raise ValueError("params argument is required for initialization")
self.__params = params
self.__kafka_configured = os.path.isfile(self.__params.rest_kafka_configured_flag_file)
self.__kafka_acl_configured = os.path.isfile(self.__params.rest_kafka_acl_configured_flag_file)
self.__hbase_configured = os.path.isfile(self.__params.rest_hbase_configured_flag_file)
self.__hbase_acl_configured = os.path.isfile(self.__params.rest_hbase_acl_configured_flag_file)
Directory(params.metron_rest_pid_dir,
mode=0755,
owner=params.metron_user,
group=params.metron_group,
create_parents=True
)
Directory(params.metron_log_dir,
mode=0755,
owner=params.metron_user,
group=params.metron_group,
create_parents=True
)
def __get_topics(self):
return [self.__params.metron_escalation_topic]
def is_kafka_configured(self):
return self.__kafka_configured
def is_kafka_acl_configured(self):
return self.__kafka_acl_configured
def is_hbase_configured(self):
return self.__hbase_configured
def is_hbase_acl_configured(self):
return self.__hbase_acl_configured
def set_kafka_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.rest_kafka_configured_flag_file, "Setting Kafka configured to True for rest")
def set_kafka_acl_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.rest_kafka_acl_configured_flag_file, "Setting Kafka ACL configured to True for rest")
def set_hbase_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.rest_hbase_configured_flag_file, "Setting HBase configured to True for rest")
def set_hbase_acl_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.rest_hbase_acl_configured_flag_file, "Setting HBase ACL configured to True for rest")
def init_kafka_topics(self):
Logger.info('Creating Kafka topics for rest')
metron_service.init_kafka_topics(self.__params, self.__get_topics())
def init_kafka_acls(self):
Logger.info('Creating Kafka ACLs for rest')
# The following topics must be permissioned for the rest application list operation
topics = self.__get_topics() + [self.__params.ambari_kafka_service_check_topic, self.__params.consumer_offsets_topic]
metron_service.init_kafka_acls(self.__params, topics)
groups = ['metron-rest']
metron_service.init_kafka_acl_groups(self.__params, groups)
def start_rest_application(self):
"""
Start the REST application
"""
Logger.info('Starting REST application')
if self.__params.security_enabled:
kinit(self.__params.kinit_path_local,
self.__params.metron_keytab_path,
self.__params.metron_principal_name,
execute_user=self.__params.metron_user)
# Get the PID associated with the service
pid_file = format("{metron_rest_pid_dir}/{metron_rest_pid}")
pid = get_user_call_output.get_user_call_output(format("cat {pid_file}"), user=self.__params.metron_user, is_checked_call=False)[1]
process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")
# Set the password with env variable instead of param to avoid it showing in ps
cmd = format((
"export METRON_JDBC_PASSWORD={metron_jdbc_password!p};"
"export JAVA_HOME={java_home};"
"export METRON_REST_CLASSPATH={metron_rest_classpath};"
"export METRON_INDEX_CP={metron_indexing_classpath};"
"export METRON_LOG_DIR={metron_log_dir};"
"export METRON_PID_FILE={pid_file};"
"export METRON_RA_INDEXING_WRITER={ra_indexing_writer};"
"{metron_home}/bin/metron-rest.sh;"
"unset METRON_JDBC_PASSWORD;"
))
Execute(cmd,
user = self.__params.metron_user,
logoutput=True,
not_if = process_id_exists_command,
timeout=60)
Logger.info('Done starting REST application')
def stop_rest_application(self):
"""
Stop the REST application
"""
Logger.info('Stopping REST application')
# Get the pid associated with the service
pid_file = format("{metron_rest_pid_dir}/{metron_rest_pid}")
pid = get_user_call_output.get_user_call_output(format("cat {pid_file}"), user=self.__params.metron_user, is_checked_call=False)[1]
process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")
if self.__params.security_enabled:
kinit(self.__params.kinit_path_local,
self.__params.metron_keytab_path,
self.__params.metron_principal_name,
execute_user=self.__params.metron_user)
# Politely kill
kill_cmd = ('kill', format("{pid}"))
Execute(kill_cmd,
sudo=True,
not_if = format("! ({process_id_exists_command})")
)
# Violently kill
hard_kill_cmd = ('kill', '-9', format("{pid}"))
wait_time = 5
Execute(hard_kill_cmd,
not_if = format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )"),
sudo=True,
ignore_failures = True
)
try:
# check if stopped the process, else fail the task
Execute(format("! ({process_id_exists_command})"),
tries=20,
try_sleep=3,
)
except:
show_logs(self.__params.metron_log_dir, self.__params.metron_user)
raise
File(pid_file, action = "delete")
Logger.info('Done stopping REST application')
def restart_rest_application(self, env):
"""
Restart the REST application
:param env: Environment
"""
Logger.info('Restarting the REST application')
self.stop_rest_application()
self.start_rest_application()
Logger.info('Done restarting the REST application')
def status_rest_application(self, env):
"""
Performs a status check for the REST application
:param env: Environment
"""
Logger.info('Status check the REST application')
metron_service.check_http(
self.__params.metron_rest_host,
self.__params.metron_rest_port,
self.__params.metron_user)
def create_hbase_tables(self):
Logger.info("Creating HBase Tables")
metron_service.create_hbase_table(self.__params,
self.__params.user_settings_hbase_table,
self.__params.user_settings_hbase_cf)
Logger.info("Done creating HBase Tables")
self.set_hbase_configured()
def set_hbase_acls(self):
Logger.info("Setting HBase ACLs")
if self.__params.security_enabled:
kinit(self.__params.kinit_path_local,
self.__params.hbase_keytab_path,
self.__params.hbase_principal_name,
execute_user=self.__params.hbase_user)
cmd = "echo \"grant '{0}', 'RW', '{1}'\" | hbase shell -n"
add_rest_acl_cmd = cmd.format(self.__params.metron_user, self.__params.user_settings_hbase_table)
Execute(add_rest_acl_cmd,
tries=3,
try_sleep=5,
logoutput=False,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=self.__params.hbase_user
)
Logger.info("Done setting HBase ACLs")
self.set_hbase_acl_configured()
def service_check(self, env):
"""
Performs a service check for the REST application
:param env: Environment
"""
Logger.info('Checking connectivity to REST application')
metron_service.check_http(
self.__params.metron_rest_host,
self.__params.metron_rest_port,
self.__params.metron_user)
Logger.info('Checking Kafka topics for the REST application')
metron_service.check_kafka_topics(self.__params, self.__get_topics())
if self.__params.security_enabled:
Logger.info('Checking Kafka topic ACL for the REST application')
metron_service.check_kafka_acls(self.__params, self.__get_topics())
Logger.info("REST application service check completed successfully")
| 40.694118 | 164 | 0.668979 | 9,092 | 0.876168 | 0 | 0 | 0 | 0 | 0 | 0 | 3,301 | 0.318107 |
0ff7544e0c718c0471e071d248d68513094b0c4f | 1,358 | py | Python | alipay/aop/api/response/AlipayPcreditHuabeiGoodsCategoryQueryResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayPcreditHuabeiGoodsCategoryQueryResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayPcreditHuabeiGoodsCategoryQueryResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.GoodsCategoryResult import GoodsCategoryResult
class AlipayPcreditHuabeiGoodsCategoryQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayPcreditHuabeiGoodsCategoryQueryResponse, self).__init__()
self._categorys = None
self._success = None
@property
def categorys(self):
return self._categorys
@categorys.setter
def categorys(self, value):
if isinstance(value, list):
self._categorys = list()
for i in value:
if isinstance(i, GoodsCategoryResult):
self._categorys.append(i)
else:
self._categorys.append(GoodsCategoryResult.from_alipay_dict(i))
@property
def success(self):
return self._success
@success.setter
def success(self, value):
self._success = value
def parse_response_content(self, response_content):
response = super(AlipayPcreditHuabeiGoodsCategoryQueryResponse, self).parse_response_content(response_content)
if 'categorys' in response:
self.categorys = response['categorys']
if 'success' in response:
self.success = response['success']
| 31.581395 | 118 | 0.667894 | 1,156 | 0.851252 | 0 | 0 | 558 | 0.410898 | 0 | 0 | 84 | 0.061856 |
0ff7fbbc548ae8f478c6c4aede185d3a7e7ad3c3 | 4,740 | py | Python | gadk/cli.py | stucox/gadk | 6144619d78608df0559e03ee5eab76500fb9b496 | [
"MIT"
] | 1 | 2020-08-13T17:34:02.000Z | 2020-08-13T17:34:02.000Z | gadk/cli.py | stucox/gadk | 6144619d78608df0559e03ee5eab76500fb9b496 | [
"MIT"
] | 2 | 2020-05-04T14:11:02.000Z | 2022-03-25T12:00:00.000Z | gadk/cli.py | stucox/gadk | 6144619d78608df0559e03ee5eab76500fb9b496 | [
"MIT"
] | 1 | 2022-03-25T11:31:59.000Z | 2022-03-25T11:31:59.000Z | import importlib
import inspect
from os import getcwd, makedirs
from os.path import exists
from typing import List, Set, Optional
import click
import sys
from gadk import Workflow
def output_to_file(workflow: Workflow):
"""Write the workflow to .github/workflows/{workflow.filename}.yml."""
makedirs(".github/workflows/", exist_ok=True)
with open(f".github/workflows/{workflow.filename}.yml", mode="w") as fd:
fd.write(workflow.render())
def output_to_stdout(workflow: Workflow):
click.echo(workflow.render())
def find_workflows() -> List[Workflow]:
"""
Extract workflows from imported module.
Typing is mostly disabled for this function because:
1. Workflow subclasses are expected. If they are not present, `gadk` can do no work.
2. Workflow subclasses should define a constructor with no arguments. The arguments
exist for the programmer to name the workflow. `gadk` cannot guess these arguments.
"""
def _find_workflows(subclasses: List, workflows: Set) -> Set:
"""
Recursive function to find workflows by descending a class hierarchy of abstract workflows.
Simple workflows will return immediately. More complex projects might recurse once or twice.
"""
child_workflows = []
for workflow_class in subclasses:
# Add subclasses of abstract subclasses. This allows for further abstractions of workflows.
if inspect.isabstract(workflow_class):
child_workflows += [
child_workflow
for child_workflow in workflow_class.__subclasses__()
if child_workflow not in workflows
]
else:
workflows.add(workflow_class)
if child_workflows:
return _find_workflows(child_workflows, workflows)
return workflows
# Collect the workflows detected in the module. There may be abstractions of workflows,
# so we'll look at subclasses of subclasses, and so on, if necessary.
workflows = _find_workflows(Workflow.__subclasses__(), set())
# Filter out those abstract workflows. Only concrete workflows should be returned.
return [
workflow_class()
for workflow_class in workflows
if not inspect.isabstract(workflow_class)
]
def import_workflows():
# Import actions.py from the current working directory.
sys.path.insert(0, getcwd())
importlib.import_module("actions")
sys.path.pop(0)
# Sort workflows for consistency.
return sorted(find_workflows(), key=lambda w: w.name)
def fetch_actual_workflow_contents(workflow_name: str) -> Optional[str]:
workflow_path = f".github/workflows/{workflow_name}.yml"
if not exists(workflow_path):
return None
else:
with open(f".github/workflows/{workflow_name}.yml") as fd:
return fd.read()
def _sync(print_to_stdout: bool):
# Determine output per workflow.
outputter = output_to_stdout if print_to_stdout else output_to_file
# Assume actions.py imports all elements of gadk to get subclasses of Workflow.
workflows = import_workflows()
for workflow in workflows:
outputter(workflow)
@click.group(
invoke_without_command=True,
context_settings={"help_option_names": ["-h", "--help"]},
)
@click.pass_context
@click.option(
"--print/--no-print",
default=False,
help="Print workflow YAML to stdout. By default each workflow is written to .github/workflows/.",
)
@click.version_option()
def cmd(ctx: click.Context, print: bool = False):
"""Generate Github Actions workflows from code."""
if ctx.invoked_subcommand is None:
_sync(print)
@cmd.command()
@click.option(
"--print/--no-print",
default=False,
help="Print workflow YAML to stdout. By default each workflow is written to .github/workflows/.",
)
def sync(print: bool):
"""Generate Github Actions workflows from code."""
_sync(print)
@cmd.command()
def check():
"""Check if generated workflow files are up to date."""
success = True
for workflow in import_workflows():
actual_content = fetch_actual_workflow_contents(workflow.filename)
if actual_content is None or actual_content != workflow.render():
click.echo(
click.style(f"Workflow {workflow.filename} is outdated!", fg="red")
)
success = False
else:
click.echo(f"Workflow {workflow.filename} is up to date.")
if not success:
raise click.exceptions.ClickException(
"Some workflows are outdated. Please run gadk to sync workflows."
)
if __name__ == "__main__":
cmd()
| 32.027027 | 103 | 0.674895 | 0 | 0 | 0 | 0 | 1,442 | 0.304219 | 0 | 0 | 1,942 | 0.409705 |
0ff91e3fafb80c50fca16bcdb41b0cd40242dff6 | 1,373 | py | Python | hummingbot/cli/utils/symbol_splitter.py | mitakash/hummingbot | 58c2bc421f5da9cef472fea473f9b5273466b11c | [
"Apache-2.0"
] | null | null | null | hummingbot/cli/utils/symbol_splitter.py | mitakash/hummingbot | 58c2bc421f5da9cef472fea473f9b5273466b11c | [
"Apache-2.0"
] | null | null | null | hummingbot/cli/utils/symbol_splitter.py | mitakash/hummingbot | 58c2bc421f5da9cef472fea473f9b5273466b11c | [
"Apache-2.0"
] | null | null | null | import re
from typing import Tuple
BINANCE_SYMBOL_SPLITTER = re.compile(r"^(\w+)(BTC|ETH|BNB|XRP|USDT|USDC|TUSD|PAX)$")
class SymbolSplitter:
def __init__(self, market: str, symbol: str):
self._symbol: Tuple[str, str] = self.split(market, symbol)
@property
def base_asset(self):
return self._symbol[0]
@property
def quote_asset(self):
return self._symbol[1]
@staticmethod
def split(market, symbol) -> Tuple[str, str]:
"""
Takes an exchange pair and return
:param market: lowercase market e.g. binance
:param symbol: uppercase exchange pair e.g. ETHUSDT
:return: tuple: (base_asset, quote_asset)
"""
try:
if market == "binance":
m = BINANCE_SYMBOL_SPLITTER.match(symbol)
result: Tuple = (m.group(1), m.group(2))
elif market in ["ddex", "radar_relay", "coinbase_pro"]:
result: Tuple = tuple(symbol.split('-'))
else:
raise ValueError("Market %s not supported" % (market,))
except Exception:
raise ValueError("Error parsing %s symbol. Symbol %s is not a valid %s symbol" % (market, symbol, market))
if len(result) != 2:
raise ValueError("Symbol %s does not match %s's format" % (symbol, market))
return result
| 30.511111 | 118 | 0.589221 | 1,246 | 0.907502 | 0 | 0 | 1,090 | 0.793882 | 0 | 0 | 435 | 0.316824 |
0ff9a9bae36874fc3a41a605b6109b76794154d0 | 991 | py | Python | headerstest.py | mzeinstra/openAnalyser | 859156117948eb15283c348e6f6025cae9352279 | [
"MIT"
] | 1 | 2021-06-28T09:39:43.000Z | 2021-06-28T09:39:43.000Z | headerstest.py | mzeinstra/openAnalyser | 859156117948eb15283c348e6f6025cae9352279 | [
"MIT"
] | null | null | null | headerstest.py | mzeinstra/openAnalyser | 859156117948eb15283c348e6f6025cae9352279 | [
"MIT"
] | null | null | null | from urllib.parse import urlparse
from bs4 import BeautifulSoup
import urllib3
from socket import timeout
import tldextract
import re
import traceback
import sys
import logging
import socket
import threading
from time import sleep
from collector import Collector
from checker import Checker
http = urllib3.PoolManager()
page = http.request('GET', "http://opennederland.nl", timeout=2)
print(page)
print("-------------------------------------------------------")
print(page.headers)
print("-------------------------------------------------------")
print(page.headers.keys())
print("-------------------------------------------------------")
print(page.headers.items())
print("-------------------------------------------------------")
t = page.headers.items()
print("-------------------------------------------------------")
d = dict((x, y) for x, y in t)
print (d)
print("-------------------------------------------------------")
if "X-Powered-By" in d:
print ("print " + d['X-Powered-By']) | 30.030303 | 64 | 0.491423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 408 | 0.411705 |
0ff9f9952934c592807600d700c67ca369be77e0 | 2,074 | py | Python | counterfit/core/attacks.py | Mandroide/counterfit | 3252588d45514192edd4444b3bff0bf006f92bf0 | [
"MIT"
] | null | null | null | counterfit/core/attacks.py | Mandroide/counterfit | 3252588d45514192edd4444b3bff0bf006f92bf0 | [
"MIT"
] | null | null | null | counterfit/core/attacks.py | Mandroide/counterfit | 3252588d45514192edd4444b3bff0bf006f92bf0 | [
"MIT"
] | null | null | null | import datetime
import uuid
import numpy as np
from hyperopt import pyll
from counterfit.core import enums
from counterfit.core.interfaces import AbstractAttack
class Attack(AbstractAttack):
"""Attack class inherits AbstractAttack and sets attack parameters. It also persists attack results and logs to json configuration file"""
def __init__(self, status=enums.AttackStatus.pending.value):
self.attack_id = uuid.uuid4().hex[:8]
self.status = status
# Set in the target
self.samples = None
self.sample_index = None
self.target_class = None
# Logs
self._logs = []
# Results of the attack
self.results = []
# set creation time
self.created_time = datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S GMT")
def set_attack_parameters(self, parameters):
if parameters == "default":
self.parameters = self.default
elif parameters == "random":
self.parameters = self._param_floats_to_ints(pyll.stochastic.sample(self.random))
elif type(parameters) == dict:
self.parameters = self._param_floats_to_ints(parameters)
else:
print("\n[!] Parameters arguement not understood. Setting default.")
self.parameters = self.default
def _param_floats_to_ints(self, parameters):
new_parameters = {}
for k, v in parameters.items():
if isinstance(v, float) and v < np.inf:
if int(v) == v:
v = int(v)
new_parameters[k] = v
return new_parameters
def dump(self):
return {
"attack_name": self.attack_name,
"created": self.created_time,
"attack_id": self.attack_id,
"sample_index": self.sample_index,
"target_class": self.target_class,
"parameters": self.parameters,
"results": self.results,
"logs": self._logs,
}
def append_log(self, log):
self._logs.append(log)
| 30.057971 | 142 | 0.60704 | 1,909 | 0.920444 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.1919 |
0ffa165f3d1ac05d9baa3f4e414d5d4b7ae714b1 | 4,909 | py | Python | electronics/scripts/generate_svg.py | chrisdearman/splitflap | 3a67ecf99119994f7e678ca0eac94586ba79326e | [
"Apache-2.0"
] | 2,138 | 2015-10-05T18:39:40.000Z | 2022-03-31T19:16:08.000Z | electronics/scripts/generate_svg.py | chrisdearman/splitflap | 3a67ecf99119994f7e678ca0eac94586ba79326e | [
"Apache-2.0"
] | 136 | 2015-11-08T02:22:51.000Z | 2022-03-24T23:57:14.000Z | electronics/scripts/generate_svg.py | chrisdearman/splitflap | 3a67ecf99119994f7e678ca0eac94586ba79326e | [
"Apache-2.0"
] | 175 | 2016-08-07T21:20:41.000Z | 2022-03-31T03:58:37.000Z | #!/usr/bin/env python3
# Copyright 2015-2016 Scott Bezek and the splitflap contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import pcbnew
import shutil
import subprocess
import pcb_util
from svg_processor import SvgProcessor
electronics_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def color_with_alpha(base_color, alpha):
return (base_color & ~(0xFF << 24)) | ((alpha & 0xFF) << 24)
def run(pcb_file):
output_directory = os.path.join(electronics_root, 'build')
temp_dir = os.path.join(output_directory, 'temp_layers')
shutil.rmtree(temp_dir, ignore_errors=True)
try:
os.makedirs(temp_dir)
plot_to_directory(pcb_file, output_directory, temp_dir)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def plot_to_directory(pcb_file, output_directory, temp_dir):
board_name = os.path.splitext(os.path.basename(pcb_file))[0]
layers = [
{
'layer': pcbnew.B_SilkS,
'color': '#CC00CC',
'alpha': 0.8,
},
{
'layer': pcbnew.B_Cu,
'color': '#33EE33',
'alpha': 0.5,
},
{
'layer': pcbnew.F_Cu,
'color': '#CC0000',
'alpha': 0.5,
},
{
'layer': pcbnew.F_SilkS,
'color': '#00CCCC',
'alpha': 0.8,
},
{
'layer': pcbnew.Cmts_User,
'color': '#333333',
'alpha': 0.8,
},
{
'layer': pcbnew.Edge_Cuts,
'color': '#3333CC',
'alpha': 0.8,
},
]
with pcb_util.get_plotter(pcb_file, temp_dir) as plotter:
plotter.plot_options.SetExcludeEdgeLayer(True)
processed_svg_files = []
for i, layer in enumerate(layers):
output_filename = plotter.plot(layer['layer'], pcbnew.PLOT_FORMAT_SVG)
logger.info('Post-processing %s...', output_filename)
processor = SvgProcessor(output_filename)
def colorize(original):
if original.lower() == '#000000':
return layer['color']
return original
processor.apply_color_transform(colorize)
processor.wrap_with_group({
'opacity': str(layer['alpha']),
})
output_filename2 = os.path.join(temp_dir, 'processed-' + os.path.basename(output_filename))
processor.write(output_filename2)
processed_svg_files.append((output_filename2, processor))
# Plot the paste layer to its own SVG
logger.info('Plotting paste SVG')
output_filename = plotter.plot(pcbnew.F_Paste, pcbnew.PLOT_FORMAT_SVG)
processor = SvgProcessor(output_filename)
def colorize(original):
if original.lower() == '#000000':
return '#FF0000'
return original
processor.apply_group_style_transforms({
'fill-opacity': lambda _: '0',
'stroke': lambda _: '#FF0000',
'stroke-opacity': lambda _: '1',
'stroke-width': lambda _: '20',
})
paste_filename = os.path.join(output_directory, '%s_paste.svg' % board_name)
processor.write(paste_filename)
logger.info('Merging layers...')
final_svg = os.path.join(output_directory, '%s_merged.svg' % board_name)
shutil.copyfile(processed_svg_files[0][0], final_svg)
output_processor = SvgProcessor(final_svg)
for _, processor in processed_svg_files:
output_processor.import_groups(processor)
output_processor.write(final_svg)
logger.info('Rasterizing...')
raster_width = 1280
final_png = os.path.join(output_directory, '%s_merged.png' % board_name)
subprocess.check_call([
'inkscape',
'--export-area-drawing',
'--export-width', str(raster_width),
'--export-png', final_png,
'--export-background', '#FFFFFF',
final_svg,
])
if __name__ == '__main__':
parser = argparse.ArgumentParser('Generate an SVG rendering of the PCB')
parser.add_argument('pcb_file')
args = parser.parse_args()
run(args.pcb_file)
| 33.394558 | 103 | 0.608067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,290 | 0.262783 |
0ffa8f71aabd0e5b29a56e626be5c5dbaf416686 | 335 | py | Python | flloat/__version__.py | aadeshnpn/flloat | 5a84608400d401799421f872e561689e3159a513 | [
"MIT"
] | null | null | null | flloat/__version__.py | aadeshnpn/flloat | 5a84608400d401799421f872e561689e3159a513 | [
"MIT"
] | null | null | null | flloat/__version__.py | aadeshnpn/flloat | 5a84608400d401799421f872e561689e3159a513 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__title__ = "flloat"
__description__ = "A Python implementation of the FLLOAT library."
__url__ = "https://github.com/marcofavorito/flloat.git"
__version__ = "1.0.0a0"
__author__ = "Marco Favorito"
__author_email__ = "marco.favorito@gmail.com"
__license__ = "MIT license"
__copyright__ = "2019 Marco Favorito"
| 30.454545 | 66 | 0.743284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.623881 |
0ffb6d131a75f4d5e786c2e48eaefbaf6ae88520 | 326 | py | Python | app/blog/blog_entries/migrations/0002_remove_article_for_adult.py | Risoko/DRF-Auth-With-Blog-Entries | ba903d1eba1d1f774bfa3782d51d292430d84dbd | [
"MIT"
] | null | null | null | app/blog/blog_entries/migrations/0002_remove_article_for_adult.py | Risoko/DRF-Auth-With-Blog-Entries | ba903d1eba1d1f774bfa3782d51d292430d84dbd | [
"MIT"
] | null | null | null | app/blog/blog_entries/migrations/0002_remove_article_for_adult.py | Risoko/DRF-Auth-With-Blog-Entries | ba903d1eba1d1f774bfa3782d51d292430d84dbd | [
"MIT"
] | null | null | null | # Generated by Django 3.0.1 on 2020-01-08 08:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog_entries', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='for_adult',
),
]
| 18.111111 | 47 | 0.588957 | 241 | 0.739264 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.291411 |
0ffb9c15a6273824c51ed946ffd5452b3519ceb0 | 380 | py | Python | leetcode/easy/rotated-digits.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
] | 8 | 2019-05-14T12:50:29.000Z | 2022-03-01T09:08:27.000Z | leetcode/easy/rotated-digits.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
] | 46 | 2019-03-24T20:59:29.000Z | 2019-04-09T16:28:43.000Z | leetcode/easy/rotated-digits.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
] | 1 | 2022-01-28T12:46:29.000Z | 2022-01-28T12:46:29.000Z | class Solution:
def rotatedDigits(self, N: 'int') -> 'int':
result = 0
for nr in range(1, N + 1):
ok = False
for digit in str(nr):
if digit in '347':
break
if digit in '6952':
ok = True
else:
result += int(ok)
return result
| 21.111111 | 47 | 0.386842 | 379 | 0.997368 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.055263 |
0ffe19b3916428633aaa555941af73dfa1704c82 | 1,699 | py | Python | bis/apps/incubator/models/stages.py | AgustinMachiavello/business-incubation-system | 983e1308697771570891568f99d1b8ba74441d32 | [
"MIT"
] | 2 | 2021-03-03T16:16:42.000Z | 2021-03-08T22:43:10.000Z | bis/apps/incubator/models/stages.py | AgustinMachiavello/business-incubation-system | 983e1308697771570891568f99d1b8ba74441d32 | [
"MIT"
] | null | null | null | bis/apps/incubator/models/stages.py | AgustinMachiavello/business-incubation-system | 983e1308697771570891568f99d1b8ba74441d32 | [
"MIT"
] | null | null | null | """
Stage model
"""
# Models
from django.db import models
from .timeStampMixin import TimeStampMixin
from ..models.projects import Project
# Date and time
from django.utils import timezone
class Stage(TimeStampMixin, models.Model):
STAGE_TYPE_CHOICES = [
('EP', 'En Postulación'),
('EV', 'Evaluación Incubadora/Capital Semilla'),
('PI', 'Pre-Incubación'),
('IN', 'Incubación'),
('PO', 'Post-Incubación'),
]
stage_type = models.CharField(null=False, blank=False, choices=STAGE_TYPE_CHOICES, max_length=2)
description = models.CharField(null=False, blank=False, max_length=1000)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
# time_spent_interviews = models.FloatField(null=True, blank=True, editable=False)
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='stage_project')
@property
def get_time_spent(self):
# Returns all the time spent on interviews for this stage (in hours)
hours = 0
for i in self.interview_stage.all():
hours += i.get_time_spent
return hours
def __str__(self):
return '#{0} · {1} · {2}'.format(self.id, self.project.name, self.get_stage_type_display())
def save(self, *args, **kwargs):
# self.time_spent_interviews = self.get_time_spent
super(Stage, self).save(*args, **kwargs)
class Meta:
verbose_name = 'Stage'
constraints = [
models.UniqueConstraint(
fields=['project', 'stage_type'], name="const_project_stage")
]
ordering = ['-created_at', '-id']
| 32.673077 | 100 | 0.648028 | 1,512 | 0.886284 | 0 | 0 | 238 | 0.139508 | 0 | 0 | 469 | 0.274912 |
0ffecfc820d549bb4fa3335755a6e86a03bd358c | 2,584 | py | Python | src/process_particle_sys.py | faycalki/tainted-paths | 81cecf6c1fba903ec3b8043e22652d222892609d | [
"MIT"
] | 4 | 2019-09-26T21:34:32.000Z | 2021-11-18T19:31:15.000Z | src/process_particle_sys.py | faycalki/tainted-paths | 81cecf6c1fba903ec3b8043e22652d222892609d | [
"MIT"
] | null | null | null | src/process_particle_sys.py | faycalki/tainted-paths | 81cecf6c1fba903ec3b8043e22652d222892609d | [
"MIT"
] | null | null | null | import sys
sys.dont_write_bytecode = True
from module_info import *
from module_particle_systems import *
from process_common import *
# Lav's export_dir tweak
export_dir = '%s/' % export_dir.replace('\\', '/').rstrip('/')
id_pos = 0
flags_pos = 1
mesh_name_pos = 2
num_particles_pos = 3
life_pos = 4
damping_pos = 5
gravity_pos = 6
turb_size_pos = 7
turb_wt_pos = 8
alpha_key_pos = 9
red_key_pos = alpha_key_pos + 2
green_key_pos = red_key_pos + 2
blue_key_pos = green_key_pos + 2
scale_key_pos = blue_key_pos + 2
emit_box_size_pos = scale_key_pos + 2
emit_velocity_pos = emit_box_size_pos + 1
emit_rndmness_pos = emit_velocity_pos + 1
angular_speed_pos = emit_rndmness_pos + 1
angular_damping_pos = angular_speed_pos + 1
def save_psys_keys(ofile, keys1, keys2):
ofile.write("%f %f %f %f\n"%(keys1[0], keys1[1], keys2[0], keys2[1]))
def save_particle_systems():
ofile = open(export_dir + "particle_systems.txt","w")
ofile.write("particle_systemsfile version 1\n")
ofile.write("%d\n"%len(particle_systems))
for psys in particle_systems:
ofile.write("psys_%s %d %s "%(psys[0], psys[1], psys[2]))
ofile.write("%d %f %f %f %f %f \n"%(psys[num_particles_pos], psys[life_pos], psys[damping_pos], psys[gravity_pos], psys[turb_size_pos], psys[turb_wt_pos]))
save_psys_keys(ofile,psys[alpha_key_pos],psys[alpha_key_pos+1])
save_psys_keys(ofile,psys[red_key_pos],psys[red_key_pos+1])
save_psys_keys(ofile,psys[green_key_pos],psys[green_key_pos+1])
save_psys_keys(ofile,psys[blue_key_pos],psys[blue_key_pos+1])
save_psys_keys(ofile,psys[scale_key_pos],psys[scale_key_pos+1])
ofile.write("%f %f %f "%(psys[emit_box_size_pos][0],psys[emit_box_size_pos][1],psys[emit_box_size_pos][2]))
ofile.write("%f %f %f "%(psys[emit_velocity_pos][0],psys[emit_velocity_pos][1],psys[emit_velocity_pos][2]))
ofile.write("%f \n"%(psys[emit_rndmness_pos]))
if (len(psys) >= (angular_speed_pos + 1)):
ofile.write("%f "%(psys[angular_speed_pos]))
else:
ofile.write("0.0 ")
if (len(psys) >= (angular_damping_pos + 1)):
ofile.write("%f "%(psys[angular_damping_pos]))
else:
ofile.write("0.0 ")
ofile.write("\n")
ofile.close()
def save_python_header():
ofile = open("./ID_particle_systems.py","w")
for i_particle_system in xrange(len(particle_systems)):
ofile.write("psys_%s = %d\n"%(particle_systems[i_particle_system][0],i_particle_system))
ofile.close()
print "Exporting particle data..."
save_particle_systems()
save_python_header()
| 37.449275 | 159 | 0.700464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.113003 |
0fff06c8d1de1e3a853ccb96506d8813c421c072 | 498 | py | Python | local_configs/write_config.py | wzpscott/SegformerDistillation | 6558757f5071251410e90270e197755860a6f41c | [
"DOC"
] | null | null | null | local_configs/write_config.py | wzpscott/SegformerDistillation | 6558757f5071251410e90270e197755860a6f41c | [
"DOC"
] | null | null | null | local_configs/write_config.py | wzpscott/SegformerDistillation | 6558757f5071251410e90270e197755860a6f41c | [
"DOC"
] | null | null | null | import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config_path')
parser.add_argument('sh_path',default='train_10.24.sh')
args = parser.parse_args()
config_path = args.config_path
sh_path = args.sh_path
with open(f'/home/mist/SegformerDistillation/local_configs/{sh_path}','w') as f:
for c in os.listdir(config_path):
if 'example' not in c:
command = f'bash tools/dist_train.sh {config_path}{c} 8;\n'
f.write(command)
print('done') | 33.2 | 80 | 0.712851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.329317 |
ba002eceb241d614be5bd495796015ba35b2c230 | 4,297 | py | Python | boris/reporting/reports/clients.py | fragaria/BorIS | 9585c83f29220d8f63910dabd98641ab41ace6cf | [
"MIT"
] | 1 | 2021-08-10T14:01:26.000Z | 2021-08-10T14:01:26.000Z | boris/reporting/reports/clients.py | fragaria/BorIS | 9585c83f29220d8f63910dabd98641ab41ace6cf | [
"MIT"
] | 5 | 2018-04-04T14:31:34.000Z | 2020-06-08T07:50:23.000Z | boris/reporting/reports/clients.py | fragaria/BorIS | 9585c83f29220d8f63910dabd98641ab41ace6cf | [
"MIT"
] | 4 | 2017-02-06T15:38:34.000Z | 2018-03-21T09:40:12.000Z | # -*- coding: utf-8 -*-
from boris import utils
import datetime
from django.contrib.contenttypes.models import ContentType
from django.template import loader
from django.template.context import RequestContext
from django.utils.translation import ugettext_lazy as _
from boris.classification import DRUG_APPLICATION_TYPES as DAT
from boris.clients.models import Client
from boris.reporting.core import BaseReport
from boris.services.models import Encounter
def enrich_with_type(client):
"""Enrich client with his/her 'type', as understood by this report."""
if client.close_person:
client.type_ = _(u'Osoba blízká')
elif client.primary_drug_usage in (DAT.VEIN_INJECTION, DAT.MUSCLE_INJECTION):
client.type_ = _(u'IV uživatel')
else:
client.type_ = _(u'neIV uživatel')
class ClientReport(BaseReport):
title = u'Shrnutí klientů'
description = (u'Přehled klientů splňujících zadaná kritéria. '
u'Město ve formuláři označuje místo, kde byl zaznamenán kontakt s klientem.')
contenttype_office = 'application/vnd.ms-excel; charset=utf-8'
columns = (_(u'Klientský kód'), _(u'Pohlaví'), _(u'Věk'), _(u'Město'),
_(u'Typ klienta'), _(u'Primární droga'))
def __init__(self, date_from=None, date_to=None, towns=None, towns_residence=None, services=None, age_to=None,
age_from=None):
client_contenttype = ContentType.objects.get_by_natural_key('clients', 'Client')
clients = Client.objects.filter_by_age(age_from, age_to)
if towns_residence is not None:
clients = clients.filter(town__in=towns_residence)
client_ids = clients.values_list('id', flat=True)
filtering = (
('performed_on__gte', date_from),
('performed_on__lte', date_to),
('where__in', towns),
('services__content_type__id__in', services),
('person__content_type', client_contenttype),
('person_id__in', client_ids),
)
filtering = ((f[0], f[1]) for f in filtering if f[1] or
(f[0] == 'person_id__in' and (age_to is not None or age_from is not None)))
self.filtering = dict(filtering)
self.date_from = date_from
self.date_to = date_to
self.towns = towns
self.towns_residence = towns_residence
self.services = services
def get_filename(self):
return 'souhrn_klientu.xls'
def get_stats(self):
person_ids = Encounter.objects.filter(**self.filtering).values_list(
'person', flat=True).select_related('services', 'services__content_type')
# distinct() cannot be used here because
# Encounters are ordered by default.
clients = Client.objects.filter(person_ptr__in=person_ids).order_by(
'code')
for client in clients:
enrich_with_type(client)
return clients
@staticmethod
def get_median_age(client_stats, date_to):
relative_to = date_to or datetime.date.today()
ages = filter(bool, (c.get_relative_age(relative_to) for c in client_stats))
if any(ages):
median_age = utils.median(ages)
return int(median_age)
@staticmethod
def get_average_age(client_stats, date_to):
"""Return average age of the filtered clients."""
relative_to = date_to or datetime.date.today()
ages = filter(bool, (c.get_relative_age(relative_to) for c in client_stats))
if ages:
return int(round(float(sum(ages)) / len(ages)))
def render(self, request, display_type):
client_stats = self.get_stats()
return loader.render_to_string(
self.get_template(display_type),
{
'report': self,
'stats': client_stats,
'towns': [t.title for t in self.towns],
'towns_residence': self.towns_residence,
'services': self.services,
'date_from': self.date_from,
'date_to': self.date_to,
'average_age': self.get_average_age(client_stats, self.date_to),
'median_age': self.get_median_age(client_stats, self.date_to)
},
context_instance=RequestContext(request)
)
| 40.158879 | 114 | 0.647428 | 3,506 | 0.811011 | 0 | 0 | 638 | 0.147583 | 0 | 0 | 859 | 0.198705 |
ba01fa0a52b489962a736d36092f48f17a3d2800 | 51 | py | Python | 1-Machine-Learning/0-Basic-Knowledge/Math-Knowledge/GMM.py | yzy1996/Artificial-Intelligence | 30a9a2ce1602b9fa9be5981e98885c1c4244cbbd | [
"MIT"
] | 7 | 2019-11-09T02:55:35.000Z | 2021-08-16T12:43:44.000Z | 4-Archived/Knowledge/GMM.py | yzy1996/Machine-Learning | 67c58ee5603fe25d789b3c4b3fa3929c5ac82c1f | [
"MIT"
] | 3 | 2020-10-13T03:12:03.000Z | 2021-03-21T09:03:02.000Z | 4-Archived/Knowledge/GMM.py | yzy1996/Machine-Learning | 67c58ee5603fe25d789b3c4b3fa3929c5ac82c1f | [
"MIT"
] | 6 | 2020-06-07T08:14:15.000Z | 2021-08-02T09:04:31.000Z | '''
混合高斯模型
使用多个高斯模型来拟合数据分布
'''
import numpy as np
| 7.285714 | 18 | 0.705882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.774194 |
ba02074bfe1f775062ea20b58c4a53ed13f99818 | 23,967 | py | Python | app1/mercado_externo.py | Nicolasvegam/astrazeneca | 9f549c170553d6ad13bc2949e147f4a2a53cb67d | [
"MIT"
] | null | null | null | app1/mercado_externo.py | Nicolasvegam/astrazeneca | 9f549c170553d6ad13bc2949e147f4a2a53cb67d | [
"MIT"
] | null | null | null | app1/mercado_externo.py | Nicolasvegam/astrazeneca | 9f549c170553d6ad13bc2949e147f4a2a53cb67d | [
"MIT"
] | null | null | null | import json
frecuencias = [
{
"tag": "company",
"count": 44
},
{
"tag": "team",
"count": 44
},
{
"tag": "experience",
"count": 43
},
{
"tag": "global",
"count": 42
},
{
"tag": "local",
"count": 41
},
{
"tag": "we",
"count": 40
},
{
"tag": "experiencia",
"count": 39
},
{
"tag": "work",
"count": 39
},
{
"tag": "time",
"count": 38
},
{
"tag": "support",
"count": 37
},
{
"tag": "english",
"count": 36
},
{
"tag": "management",
"count": 36
},
{
"tag": "required",
"count": 36
},
{
"tag": "working",
"count": 35
},
{
"tag": "related",
"count": 34
},
{
"tag": "skills",
"count": 34
},
{
"tag": "years",
"count": 34
},
{
"tag": "you",
"count": 34
},
{
"tag": "business",
"count": 33
},
{
"tag": "including",
"count": 33
},
{
"tag": "medical",
"count": 33
},
{
"tag": "trabajo",
"count": 33
},
{
"tag": "clinical",
"count": 32
},
{
"tag": "degree",
"count": 32
},
{
"tag": "knowledge",
"count": 32
},
{
"tag": "communication",
"count": 31
},
{
"tag": "development",
"count": 31
},
{
"tag": "activities",
"count": 30
},
{
"tag": "description",
"count": 30
},
{
"tag": "health",
"count": 30
},
{
"tag": "strong",
"count": 30
},
{
"tag": "empresa",
"count": 28
},
{
"tag": "process",
"count": 28
},
{
"tag": "qualifications",
"count": 28
},
{
"tag": "requisitos",
"count": 28
},
{
"tag": "written",
"count": 28
},
{
"tag": "área",
"count": 28
},
{
"tag": "control",
"count": 27
},
{
"tag": "equipo",
"count": 27
},
{
"tag": "opportunities",
"count": 27
},
{
"tag": "provide",
"count": 27
},
{
"tag": "requirements",
"count": 27
},
{
"tag": "research",
"count": 27
},
{
"tag": "training",
"count": 27
},
{
"tag": "within",
"count": 27
},
{
"tag": "años",
"count": 26
},
{
"tag": "data",
"count": 26
},
{
"tag": "information",
"count": 26
},
{
"tag": "job description",
"count": 26
},
{
"tag": "office",
"count": 26
},
{
"tag": "ability",
"count": 25
},
{
"tag": "avanzado",
"count": 25
},
{
"tag": "opportunity",
"count": 25
},
{
"tag": "position",
"count": 25
},
{
"tag": "ensure",
"count": 24
},
{
"tag": "responsibilities",
"count": 24
},
{
"tag": "role",
"count": 24
},
{
"tag": "site",
"count": 24
},
{
"tag": "career",
"count": 23
},
{
"tag": "etc",
"count": 23
},
{
"tag": "join",
"count": 23
},
{
"tag": "new",
"count": 23
},
{
"tag": "personal",
"count": 23
},
{
"tag": "projects",
"count": 23
},
{
"tag": "quality",
"count": 23
},
{
"tag": "status",
"count": 23
},
{
"tag": "applicable",
"count": 22
},
{
"tag": "develop",
"count": 22
},
{
"tag": "environment",
"count": 22
},
{
"tag": "internal",
"count": 22
},
{
"tag": "patients",
"count": 22
},
{
"tag": "pharmaceutical",
"count": 22
},
{
"tag": "processes",
"count": 22
},
{
"tag": "productos",
"count": 22
},
{
"tag": "regulatory",
"count": 22
},
{
"tag": "responsible",
"count": 22
},
{
"tag": "employment",
"count": 21
},
{
"tag": "full",
"count": 21
},
{
"tag": "life",
"count": 21
},
{
"tag": "orientation",
"count": 21
},
{
"tag": "performance",
"count": 21
},
{
"tag": "products",
"count": 21
},
{
"tag": "review",
"count": 21
},
{
"tag": "salud",
"count": 21
},
{
"tag": "services",
"count": 21
},
{
"tag": "travel",
"count": 21
},
{
"tag": "across",
"count": 20
},
{
"tag": "based",
"count": 20
},
{
"tag": "compliance",
"count": 20
},
{
"tag": "education",
"count": 20
},
{
"tag": "equal",
"count": 20
},
{
"tag": "marketing",
"count": 20
},
{
"tag": "operations",
"count": 20
},
{
"tag": "organization",
"count": 20
},
{
"tag": "planning",
"count": 20
},
{
"tag": "procedures",
"count": 20
},
{
"tag": "sexual",
"count": 20
},
{
"tag": "us",
"count": 20
},
{
"tag": "apply",
"count": 19
},
{
"tag": "chile",
"count": 19
},
{
"tag": "contact",
"count": 19
},
{
"tag": "healthcare",
"count": 19
},
{
"tag": "include",
"count": 19
},
{
"tag": "industry",
"count": 19
},
{
"tag": "maintain",
"count": 19
},
{
"tag": "manager",
"count": 19
},
{
"tag": "may",
"count": 19
},
{
"tag": "monitoring",
"count": 19
},
{
"tag": "must",
"count": 19
},
{
"tag": "people",
"count": 19
},
{
"tag": "principales",
"count": 19
},
{
"tag": "procesos",
"count": 19
},
{
"tag": "race",
"count": 19
},
{
"tag": "sexual orientation",
"count": 19
},
{
"tag": "si",
"count": 19
},
{
"tag": "solutions",
"count": 19
},
{
"tag": "trabajar",
"count": 19
},
{
"tag": "use",
"count": 19
},
{
"tag": "world",
"count": 19
},
{
"tag": "assigned",
"count": 18
},
{
"tag": "employees",
"count": 18
},
{
"tag": "inglés",
"count": 18
},
{
"tag": "lead",
"count": 18
},
{
"tag": "leadership",
"count": 18
},
{
"tag": "level",
"count": 18
},
{
"tag": "needs",
"count": 18
},
{
"tag": "nivel",
"count": 18
},
{
"tag": "one",
"count": 18
},
{
"tag": "plan",
"count": 18
},
{
"tag": "religion",
"count": 18
},
{
"tag": "sites",
"count": 18
},
{
"tag": "specific",
"count": 18
},
{
"tag": "study",
"count": 18
},
{
"tag": "teams",
"count": 18
},
{
"tag": "therapeutic",
"count": 18
},
{
"tag": "well",
"count": 18
},
{
"tag": "ambiente",
"count": 17
},
{
"tag": "areas",
"count": 17
},
{
"tag": "clinical research",
"count": 17
},
{
"tag": "conocimientos",
"count": 17
},
{
"tag": "day",
"count": 17
},
{
"tag": "excel",
"count": 17
},
{
"tag": "field",
"count": 17
},
{
"tag": "functional",
"count": 17
},
{
"tag": "good",
"count": 17
},
{
"tag": "impact",
"count": 17
},
{
"tag": "lives",
"count": 17
},
{
"tag": "patient",
"count": 17
},
{
"tag": "profesional",
"count": 17
},
{
"tag": "regulations",
"count": 17
},
{
"tag": "responsabilidades",
"count": 17
},
{
"tag": "also",
"count": 16
},
{
"tag": "calidad",
"count": 16
},
{
"tag": "comunicación",
"count": 16
},
{
"tag": "countries",
"count": 16
},
{
"tag": "desarrollo",
"count": 16
},
{
"tag": "disability",
"count": 16
},
{
"tag": "equivalent",
"count": 16
},
{
"tag": "every",
"count": 16
},
{
"tag": "farmacia",
"count": 16
},
{
"tag": "general",
"count": 16
},
{
"tag": "high",
"count": 16
},
{
"tag": "innovative",
"count": 16
},
{
"tag": "language",
"count": 16
},
{
"tag": "leading",
"count": 16
},
{
"tag": "market",
"count": 16
},
{
"tag": "mercado",
"count": 16
},
{
"tag": "part",
"count": 16
},
{
"tag": "product",
"count": 16
},
{
"tag": "project",
"count": 16
},
{
"tag": "relevant",
"count": 16
},
{
"tag": "systems",
"count": 16
},
{
"tag": "accordance",
"count": 15
},
{
"tag": "appropriate",
"count": 15
},
{
"tag": "area",
"count": 15
},
{
"tag": "búsqueda",
"count": 15
},
{
"tag": "care",
"count": 15
},
{
"tag": "cargo",
"count": 15
},
{
"tag": "carrera",
"count": 15
},
{
"tag": "compañía",
"count": 15
},
{
"tag": "computer",
"count": 15
},
{
"tag": "conduct",
"count": 15
},
{
"tag": "conocimiento",
"count": 15
},
{
"tag": "customer",
"count": 15
},
{
"tag": "customers",
"count": 15
},
{
"tag": "documentation",
"count": 15
},
{
"tag": "excellent",
"count": 15
},
{
"tag": "external",
"count": 15
},
{
"tag": "gcp",
"count": 15
},
{
"tag": "gender",
"count": 15
},
{
"tag": "help",
"count": 15
},
{
"tag": "issues",
"count": 15
},
{
"tag": "key",
"count": 15
},
{
"tag": "make",
"count": 15
},
{
"tag": "minimum",
"count": 15
},
{
"tag": "offer",
"count": 15
},
{
"tag": "origin",
"count": 15
},
{
"tag": "protocol",
"count": 15
},
{
"tag": "provides",
"count": 15
},
{
"tag": "realizar",
"count": 15
},
{
"tag": "reports",
"count": 15
},
{
"tag": "sales",
"count": 15
},
{
"tag": "science",
"count": 15
},
{
"tag": "sops",
"count": 15
},
{
"tag": "trial",
"count": 15
},
{
"tag": "trials",
"count": 15
},
{
"tag": "understanding",
"count": 15
},
{
"tag": "vida",
"count": 15
},
{
"tag": "what",
"count": 15
},
{
"tag": "actions",
"count": 14
},
{
"tag": "age",
"count": 14
},
{
"tag": "analysis",
"count": 14
},
{
"tag": "argentina",
"count": 14
},
{
"tag": "años experiencia",
"count": 14
},
{
"tag": "color",
"count": 14
},
{
"tag": "diverse",
"count": 14
},
{
"tag": "effective",
"count": 14
},
{
"tag": "eg",
"count": 14
},
{
"tag": "employer",
"count": 14
},
{
"tag": "equal opportunity",
"count": 14
},
{
"tag": "follow",
"count": 14
},
{
"tag": "habilidades",
"count": 14
},
{
"tag": "importante",
"count": 14
},
{
"tag": "national",
"count": 14
},
{
"tag": "ofrecemos",
"count": 14
},
{
"tag": "preferred",
"count": 14
},
{
"tag": "regional",
"count": 14
},
{
"tag": "tareas",
"count": 14
},
{
"tag": "verbal",
"count": 14
},
{
"tag": "áreas",
"count": 14
},
{
"tag": "able",
"count": 13
},
{
"tag": "actividades",
"count": 13
},
{
"tag": "aires",
"count": 13
},
{
"tag": "análisis",
"count": 13
},
{
"tag": "both",
"count": 13
},
{
"tag": "buenos",
"count": 13
},
{
"tag": "buenos aires",
"count": 13
},
{
"tag": "capacidad",
"count": 13
},
{
"tag": "clientes",
"count": 13
},
{
"tag": "clinical trials",
"count": 13
},
{
"tag": "communication skills",
"count": 13
},
{
"tag": "core",
"count": 13
},
{
"tag": "duties",
"count": 13
},
{
"tag": "essential",
"count": 13
},
{
"tag": "farmacéutica",
"count": 13
},
{
"tag": "flexible",
"count": 13
},
{
"tag": "full time",
"count": 13
},
{
"tag": "gestión",
"count": 13
},
{
"tag": "grow",
"count": 13
},
{
"tag": "growth",
"count": 13
},
{
"tag": "información",
"count": 13
},
{
"tag": "location",
"count": 13
},
{
"tag": "manejo",
"count": 13
},
{
"tag": "medicamentos",
"count": 13
},
{
"tag": "necesidades",
"count": 13
},
{
"tag": "necessary",
"count": 13
},
{
"tag": "participate",
"count": 13
},
{
"tag": "plans",
"count": 13
},
{
"tag": "potential",
"count": 13
},
{
"tag": "proceso",
"count": 13
},
{
"tag": "providing",
"count": 13
},
{
"tag": "relationships",
"count": 13
},
{
"tag": "report",
"count": 13
},
{
"tag": "service",
"count": 13
},
{
"tag": "solving",
"count": 13
},
{
"tag": "specialist",
"count": 13
},
{
"tag": "staff",
"count": 13
},
{
"tag": "supporting",
"count": 13
},
{
"tag": "tasks",
"count": 13
},
{
"tag": "technical",
"count": 13
},
{
"tag": "together",
"count": 13
},
{
"tag": "track",
"count": 13
},
{
"tag": "través",
"count": 13
},
{
"tag": "without",
"count": 13
},
{
"tag": "access",
"count": 12
},
{
"tag": "administración",
"count": 12
},
{
"tag": "advanced",
"count": 12
},
{
"tag": "around",
"count": 12
},
{
"tag": "building",
"count": 12
},
{
"tag": "buscamos",
"count": 12
},
{
"tag": "cross",
"count": 12
},
{
"tag": "documentación",
"count": 12
},
{
"tag": "documents",
"count": 12
},
{
"tag": "ensuring",
"count": 12
},
{
"tag": "excellence",
"count": 12
},
{
"tag": "experienced",
"count": 12
},
{
"tag": "industria",
"count": 12
},
{
"tag": "líder",
"count": 12
},
{
"tag": "manage",
"count": 12
},
{
"tag": "mantener",
"count": 12
},
{
"tag": "marital",
"count": 12
},
{
"tag": "members",
"count": 12
},
{
"tag": "negocio",
"count": 12
},
{
"tag": "participar",
"count": 12
},
{
"tag": "point",
"count": 12
},
{
"tag": "policies",
"count": 12
},
{
"tag": "posición",
"count": 12
},
{
"tag": "profesionales",
"count": 12
},
{
"tag": "reviews",
"count": 12
},
{
"tag": "santiago",
"count": 12
},
{
"tag": "sap",
"count": 12
},
{
"tag": "scientific",
"count": 12
},
{
"tag": "ser",
"count": 12
},
{
"tag": "soporte",
"count": 12
},
{
"tag": "stakeholders",
"count": 12
},
{
"tag": "standard",
"count": 12
},
{
"tag": "standards",
"count": 12
},
{
"tag": "success",
"count": 12
},
{
"tag": "technology",
"count": 12
},
{
"tag": "timely",
"count": 12
},
{
"tag": "veteran",
"count": 12
},
{
"tag": "approval",
"count": 11
},
{
"tag": "best",
"count": 11
},
{
"tag": "build",
"count": 11
},
{
"tag": "cada",
"count": 11
},
{
"tag": "carreras",
"count": 11
},
{
"tag": "cliente",
"count": 11
},
{
"tag": "commercial",
"count": 11
},
{
"tag": "companies",
"count": 11
},
{
"tag": "completion",
"count": 11
},
{
"tag": "completo",
"count": 11
},
{
"tag": "condition",
"count": 11
},
{
"tag": "contacto",
"count": 11
},
{
"tag": "contract",
"count": 11
},
{
"tag": "coordinar",
"count": 11
},
{
"tag": "coordination",
"count": 11
},
{
"tag": "current",
"count": 11
},
{
"tag": "dentro",
"count": 11
},
{
"tag": "develops",
"count": 11
},
{
"tag": "diferentes",
"count": 11
},
{
"tag": "diversity",
"count": 11
},
{
"tag": "effectively",
"count": 11
},
{
"tag": "employee",
"count": 11
},
{
"tag": "equipos",
"count": 11
},
{
"tag": "farmacéutico",
"count": 11
},
{
"tag": "fluent",
"count": 11
},
{
"tag": "gender identity",
"count": 11
},
{
"tag": "guidelines",
"count": 11
},
{
"tag": "identity",
"count": 11
},
{
"tag": "implementation",
"count": 11
},
{
"tag": "important",
"count": 11
},
{
"tag": "improve",
"count": 11
},
{
"tag": "improvement",
"count": 11
},
{
"tag": "individuals",
"count": 11
},
{
"tag": "innovation",
"count": 11
},
{
"tag": "interpersonal",
"count": 11
},
{
"tag": "laboratorio",
"count": 11
},
{
"tag": "marital status",
"count": 11
},
{
"tag": "medicines",
"count": 11
},
{
"tag": "menos",
"count": 11
},
{
"tag": "next",
"count": 11
},
{
"tag": "oportunidades",
"count": 11
},
{
"tag": "oral",
"count": 11
},
{
"tag": "organización",
"count": 11
},
{
"tag": "please",
"count": 11
},
{
"tag": "portfolio",
"count": 11
},
{
"tag": "practices",
"count": 11
},
{
"tag": "problem",
"count": 11
},
{
"tag": "problem solving",
"count": 11
},
{
"tag": "software",
"count": 11
},
{
"tag": "strategy",
"count": 11
},
{
"tag": "team members",
"count": 11
},
{
"tag": "todas",
"count": 11
},
{
"tag": "trabajo equipo",
"count": 11
},
{
"tag": "written communication",
"count": 11
},
{
"tag": "action",
"count": 10
},
{
"tag": "afines",
"count": 10
},
{
"tag": "application",
"count": 10
},
{
"tag": "available",
"count": 10
},
{
"tag": "bachelor",
"count": 10
},
{
"tag": "case",
"count": 10
},
{
"tag": "client",
"count": 10
},
{
"tag": "collaboration",
"count": 10
},
{
"tag": "colleagues",
"count": 10
},
{
"tag": "color religion",
"count": 10
},
{
"tag": "comercial",
"count": 10
},
{
"tag": "company description",
"count": 10
},
{
"tag": "complex",
"count": 10
},
{
"tag": "critical",
"count": 10
},
{
"tag": "cro",
"count": 10
},
{
"tag": "cross functional",
"count": 10
},
{
"tag": "equal employment",
"count": 10
},
{
"tag": "excluyente",
"count": 10
},
{
"tag": "execution",
"count": 10
},
{
"tag": "expertise",
"count": 10
},
{
"tag": "funciones",
"count": 10
},
{
"tag": "goals",
"count": 10
},
{
"tag": "ich",
"count": 10
},
{
"tag": "laboratorios",
"count": 10
},
{
"tag": "liderar",
"count": 10
},
{
"tag": "maintaining",
"count": 10
},
{
"tag": "mantenimiento",
"count": 10
},
{
"tag": "meet",
"count": 10
},
{
"tag": "meetings",
"count": 10
},
{
"tag": "microsoft",
"count": 10
},
{
"tag": "mission",
"count": 10
},
{
"tag": "multiple",
"count": 10
},
{
"tag": "mundo",
"count": 10
},
{
"tag": "médicos",
"count": 10
},
{
"tag": "need",
"count": 10
},
{
"tag": "needed",
"count": 10
},
{
"tag": "objectives",
"count": 10
},
{
"tag": "ongoing",
"count": 10
},
{
"tag": "open",
"count": 10
},
{
"tag": "operating",
"count": 10
},
{
"tag": "operating procedures",
"count": 10
},
{
"tag": "organizational",
"count": 10
},
{
"tag": "orientación",
"count": 10
},
{
"tag": "pacientes",
"count": 10
},
{
"tag": "perfil",
"count": 10
},
{
"tag": "persona",
"count": 10
},
{
"tag": "personas",
"count": 10
},
{
"tag": "policy",
"count": 10
},
{
"tag": "positions",
"count": 10
},
{
"tag": "primary",
"count": 10
},
{
"tag": "prior",
"count": 10
},
{
"tag": "professional",
"count": 10
},
{
"tag": "recruitment",
"count": 10
},
{
"tag": "regarding",
"count": 10
},
{
"tag": "regular",
"count": 10
},
{
"tag": "relaciones",
"count": 10
},
{
"tag": "resolution",
"count": 10
},
{
"tag": "resources",
"count": 10
},
{
"tag": "sciences",
"count": 10
},
{
"tag": "seguimiento",
"count": 10
},
{
"tag": "servicios",
"count": 10
},
{
"tag": "sex",
"count": 10
},
{
"tag": "strategic",
"count": 10
},
{
"tag": "successful",
"count": 10
},
{
"tag": "superior",
"count": 10
},
{
"tag": "tiempo",
"count": 10
},
{
"tag": "tools",
"count": 10
},
{
"tag": "tracking",
"count": 10
},
{
"tag": "university",
"count": 10
},
{
"tag": "visit",
"count": 10
},
{
"tag": "written communication skills",
"count": 10
},
{
"tag": "according",
"count": 9
},
{
"tag": "accuracy",
"count": 9
},
{
"tag": "administration",
"count": 9
},
{
"tag": "agency",
"count": 9
},
{
"tag": "assist",
"count": 9
},
{
"tag": "bachelor degree",
"count": 9
},
{
"tag": "bajo",
"count": 9
},
{
"tag": "basic",
"count": 9
},
{
"tag": "basis",
"count": 9
},
{
"tag": "bring",
"count": 9
},
{
"tag": "candidates",
"count": 9
},
{
"tag": "clinical trial",
"count": 9
},
{
"tag": "collection",
"count": 9
},
{
"tag": "come",
"count": 9
},
{
"tag": "country",
"count": 9
},
{
"tag": "cra",
"count": 9
},
{
"tag": "cultura",
"count": 9
},
{
"tag": "da",
"count": 9
},
{
"tag": "dedicated",
"count": 9
},
{
"tag": "delivery",
"count": 9
},
{
"tag": "demonstrated",
"count": 9
},
{
"tag": "department",
"count": 9
},
{
"tag": "diseases",
"count": 9
},
{
"tag": "disponibilidad",
"count": 9
},
{
"tag": "employment opportunity",
"count": 9
},
{
"tag": "empresas",
"count": 9
},
{
"tag": "ensures",
"count": 9
},
{
"tag": "equal employment opportunity",
"count": 9
},
{
"tag": "estrategias",
"count": 9
},
{
"tag": "every day",
"count": 9
},
{
"tag": "following",
"count": 9
},
{
"tag": "health care",
"count": 9
},
{
"tag": "herramientas",
"count": 9
},
{
"tag": "international",
"count": 9
},
{
"tag": "internos",
"count": 9
},
{
"tag": "investigator",
"count": 9
},
{
"tag": "laboral",
"count": 9
},
{
"tag": "leader",
"count": 9
},
{
"tag": "least",
"count": 9
},
{
"tag": "maintains",
"count": 9
},
{
"tag": "master",
"count": 9
},
{
"tag": "ms",
"count": 9
},
{
"tag": "oncology",
"count": 9
},
{
"tag": "oportunidad",
"count": 9
},
{
"tag": "order",
"count": 9
},
{
"tag": "others",
"count": 9
},
{
"tag": "parte",
"count": 9
},
{
"tag": "partner",
"count": 9
},
{
"tag": "prepare",
"count": 9
},
{
"tag": "presentation",
"count": 9
},
{
"tag": "protected",
"count": 9
},
{
"tag": "race color",
"count": 9
},
{
"tag": "range",
"count": 9
},
{
"tag": "related field",
"count": 9
},
{
"tag": "schedule",
"count": 9
},
{
"tag": "senior",
"count": 9
},
{
"tag": "set",
"count": 9
},
{
"tag": "spanish",
"count": 9
},
{
"tag": "sponsor",
"count": 9
},
{
"tag": "standard operating",
"count": 9
},
{
"tag": "standard operating procedures",
"count": 9
}
]
data_mercado = json.dumps(frecuencias ,ensure_ascii=False)
| 12.019559 | 58 | 0.375475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11,034 | 0.459961 |
ba03c7612f86df91d7ee18658bef4eedbec16d11 | 49 | py | Python | servers/__init__.py | shubhamdipt/monitoring-dashboard | ae4524f1c179cf735fdc17673865bdbdb463ee3e | [
"MIT"
] | 1 | 2021-08-08T14:40:09.000Z | 2021-08-08T14:40:09.000Z | servers/__init__.py | shubhamdipt/monitoring-dashboard | ae4524f1c179cf735fdc17673865bdbdb463ee3e | [
"MIT"
] | null | null | null | servers/__init__.py | shubhamdipt/monitoring-dashboard | ae4524f1c179cf735fdc17673865bdbdb463ee3e | [
"MIT"
] | null | null | null | default_app_config = 'servers.apps.ServersConfig' | 49 | 49 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.571429 |
ba041d9f4395da9615d70dc1c9b697b6dd747f7f | 1,903 | py | Python | config.py | SoCaTel/backend-storage | 11667d1868dc0d031bf0c6e938724bf2ba6f9343 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | config.py | SoCaTel/backend-storage | 11667d1868dc0d031bf0c6e938724bf2ba6f9343 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | config.py | SoCaTel/backend-storage | 11667d1868dc0d031bf0c6e938724bf2ba6f9343 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #--------------------------------------------------------------------------------
# SoCaTel - Backend data storage API endpoints docker container
# These tokens are needed for database access.
#--------------------------------------------------------------------------------
#===============================================================================
# SoCaTel Knowledge Base Deployment
#===============================================================================
# The following are the pre-defined names of elasticsearch indices for the SoCaTel project which
# host data shared with the front-end.
elastic_user_index = "so_user"
elastic_group_index = "so_group"
elastic_organisation_index = "so_organisation"
elastic_service_index = "so_service"
elastic_host = "<insert_elastic_host>" # e.g. "127.0.0.1"
elastic_port = "9200" # Default Elasticsearch port, change accordingly
elastic_user = "<insert_elasticsearch_username>"
elastic_passwd = "<insert_elasticsearch_password>"
#===============================================================================
#===============================================================================
# Linked Pipes ETL Configuration
#===============================================================================
# The following correspond to the URLs corresponding to the linked pipes executions,
# which need to be setup beforehand. They are set to localhost, change according to deployment details
path = "http://127.0.0.1:32800/resources/executions"
user_pipeline = "http://127.0.0.1:32800/resources/pipelines/1578586195746"
group_pipeline = "http://127.0.0.1:32800/resources/pipelines/1578586045942"
organisation_pipeline = "http://127.0.0.1:32800/resources/pipelines/1575531753483"
service_pipeline = "http://127.0.0.1:32800/resources/pipelines/1565080262463"
#===============================================================================
| 55.970588 | 102 | 0.524435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,625 | 0.853915 |
ba042e7a2bc7c5d67d985eb95d5d7984724f276f | 5,055 | py | Python | vet_care/vet_care/report/vc_sales_transaction/vc_sales_transaction.py | neerajvkn/vet_care | 14914b22e7a83265d736f9f9dc5186271ae62d66 | [
"MIT"
] | 2 | 2020-11-23T11:14:32.000Z | 2021-02-03T06:40:33.000Z | vet_care/vet_care/report/vc_sales_transaction/vc_sales_transaction.py | neerajvkn/vet_care | 14914b22e7a83265d736f9f9dc5186271ae62d66 | [
"MIT"
] | null | null | null | vet_care/vet_care/report/vc_sales_transaction/vc_sales_transaction.py | neerajvkn/vet_care | 14914b22e7a83265d736f9f9dc5186271ae62d66 | [
"MIT"
] | 7 | 2019-11-16T14:36:33.000Z | 2021-08-25T07:54:51.000Z | # Copyright (c) 2013, 9T9IT and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import fmt_money
from toolz.curried import compose, groupby, valmap, first, reduce, unique, pluck, count, partial
def execute(filters=None):
columns, data = _get_columns(filters), _get_data(filters)
return columns, _append_summary(data)
def _get_columns(filters):
def make_column(label, fieldname, width, fieldtype='Data', options='', hidden=False):
return {
'label': _(label),
'fieldname': fieldname,
'fieldtype': fieldtype,
'width': width,
'options': options
}
return [
make_column('Invoice No', 'invoice_no', 130, 'Link', 'Sales Invoice'),
make_column('Invoice Date', 'invoice_date', 130, 'Date'),
make_column('Item', 'item', 130, 'Link', 'Item'),
make_column('Item Group', 'item_group', 130, 'Link', 'Item Group'),
make_column('Description', 'description', 130),
make_column('Total VAT', 'total_vat', 130, 'Currency'),
make_column('Cost Center', 'cost_center', 130, 'Link', 'Cost Center'),
make_column('Sales Person', 'sales_person_name', 130, 'Data'),
make_column('Customer', 'customer', 130, 'Link', 'Customer'),
make_column('Customer Name', 'customer_name', 130, 'Data'),
make_column('Patient', 'patient', 130, 'Link', 'Patient'),
make_column('Patient Name', 'patient_name', 130, 'Data'),
make_column('Species', 'species', 130, 'Data')
]
def _get_clauses(filters):
clauses = list(filter(lambda x: x, [
'si.docstatus = 1',
'si.posting_date BETWEEN %(from_date)s AND %(to_date)s',
'sii.cost_center = %(cost_center)s' if filters.get('cost_center') else None,
'i.item_group = %(item_group)s' if filters.get('item_group') else None
]))
return 'WHERE {}'.format(' AND '.join(clauses))
def _get_sales_person_fields():
enable_pb = frappe.db.get_single_value('Vetcare Settings', 'enable_pb')
if enable_pb:
fields = [
'si.pb_sales_employee as sales_person',
'si.pb_sales_employee_name as sales_person_name'
]
else:
fields = [
'si.pb_sales_person as sales_person',
'si.pb_sales_person_name as sales_person_name'
]
return ', '.join(fields)
def _get_data(filters):
def make_data(row):
rate = _get_rate(row.get('taxes_and_charges'), cached_taxes_and_charges) / 100.00
row['total_vat'] = row.get('amount') * rate
row['species'] = species.get(row.get('patient'))
return row
data = frappe.db.sql("""
SELECT
si.name as invoice_no,
si.posting_date as invoice_date,
sii.item_code as item,
i.item_group,
sii.description,
sii.amount,
si.taxes_and_charges,
sii.cost_center,
si.customer,
si.customer_name,
si.patient,
si.patient_name,
{sales_person_fields}
FROM `tabSales Invoice Item` sii
INNER JOIN `tabSales Invoice` si ON si.name = sii.parent
INNER JOIN `tabItem` i ON i.name = sii.item_code
{clauses}
""".format(
clauses=_get_clauses(filters),
sales_person_fields=_get_sales_person_fields()
),
filters,
as_dict=1
)
cached_taxes_and_charges = {}
species = _get_species(list(set(map(lambda x: x['patient'], data))))
return list(map(make_data, data))
def _append_summary(data):
def make_data(val):
clients = compose(
count,
unique,
pluck('customer'),
lambda: val
)
animals = compose(
valmap(count),
groupby('species'),
lambda: val
)
return {
'total_val': reduce(lambda total, x: total + x.get('total_vat'), val, 0.00),
'animals': _get_dict_to_csv(animals()),
'clients': clients()
}
sales_persons = compose(
valmap(make_data),
groupby('sales_person_name'),
lambda: data
)()
data.append({'invoice_no': "'-'"}) # for report html (break loop)
for k, v in sales_persons.items():
sales_person = k or 'Not specified'
data.append({'invoice_no': "'Sales Person'", 'item': f"'{sales_person}'"})
data.append({'invoice_no': "'Total Amt'", 'item': f"'{fmt_money(v.get('total_val'))}'"})
data.append({'invoice_no': "'Clients'", 'item': f"'{v.get('clients')}'"})
data.append({'invoice_no': "'Animals'", 'item': f"'{v.get('animals')}'"})
data.append({})
return data
def _get_rate(template, cache=None):
if cache and template in cache:
return cache[template]
if template is None:
return 0.00
taxes_and_charges = frappe.get_all(
'Sales Taxes and Charges',
filters={'parent': template},
fields=['rate']
)
rate = 0.0
if taxes_and_charges:
rate = taxes_and_charges[0].get('rate')
if cache is not None:
cache[template] = rate
return rate
def _get_species(patients):
species = compose(
valmap(lambda x: x['vc_species']),
valmap(first),
groupby('name'),
lambda: frappe.get_all(
'Patient',
filters=[['name', 'in', patients]],
fields=['name', 'vc_species']
)
)
return species()
def _get_dict_to_csv(data, sep=', ', columns=None):
csv = []
for k, v in data.items():
column_name = k or "Others"
if columns and k in columns:
column_name = columns[k]
csv.append(f'{column_name}={v}')
return sep.join(csv)
| 27.324324 | 96 | 0.682295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,995 | 0.394659 |
ba04925fb21e3e8cf52311a9c8f795fc603cbc36 | 1,193 | py | Python | integration-test/1634-hide-early-nursing-home.py | rinnyB/vector-datasource | 024909ed8245a4ad4a25c908413ba3602de6c335 | [
"MIT"
] | null | null | null | integration-test/1634-hide-early-nursing-home.py | rinnyB/vector-datasource | 024909ed8245a4ad4a25c908413ba3602de6c335 | [
"MIT"
] | 2 | 2021-03-31T20:22:37.000Z | 2021-12-13T20:50:11.000Z | integration-test/1634-hide-early-nursing-home.py | rinnyB/vector-datasource | 024909ed8245a4ad4a25c908413ba3602de6c335 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
from . import FixtureTest
class HideEarlyNursingHomeTest(FixtureTest):
def test_nursing_home_area(self):
import dsl
z, x, y = (15, 5237, 12667)
self.generate_fixtures(
# https://www.openstreetmap.org/way/267296981
dsl.way(267296981, dsl.tile_box(z, x, y), {
'addr:city': u'San Francisco',
'addr:country': u'US',
'addr:housenumber': u'1575',
'addr:postcode': u'94122',
'addr:state': u'CA',
'addr:street': u'7th Avenue',
'amenity': u'nursing_home',
'building': u'yes',
'height': u'5',
'name': (u'Kindred Transitional Care and Rehabilitation - '
u'Lawton'),
'phone': u'+1 (415) 566-1200',
'source': u'openstreetmap.org',
'website': u'http://www.lawtonhealthcare.com',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 267296981,
'kind': u'nursing_home',
'min_zoom': 15,
})
| 31.394737 | 75 | 0.460184 | 1,138 | 0.953898 | 0 | 0 | 0 | 0 | 0 | 0 | 463 | 0.388097 |
ba05091c49ea6a651c08af4ab43f6b89d1619ef6 | 1,567 | py | Python | mido_midifile.py | olemb/rawmidifile | d2f5df882257b14c924a90110d506729080e8c1e | [
"MIT"
] | null | null | null | mido_midifile.py | olemb/rawmidifile | d2f5df882257b14c924a90110d506729080e8c1e | [
"MIT"
] | null | null | null | mido_midifile.py | olemb/rawmidifile | d2f5df882257b14c924a90110d506729080e8c1e | [
"MIT"
] | null | null | null | """
MIDI file reader and writer for Mido built on top of rawmidifile.
There is no official API in Mido for encoding and decoding meta
messages so I've had to use some internal functions.
"""
from rawmidifile import read_rawmidifile, write_rawmidifile
import mido
from mido.midifiles.meta import (build_meta_message,
_META_SPEC_BY_TYPE, UnknownMetaMessage)
def decode_msg(msg, delta=0):
if msg[0] == 0xff and len(msg) > 1:
# Meta message.
return build_meta_message(msg[1], msg[2:], delta)
else:
return mido.Message.from_bytes(msg, delta)
def decode_track(track):
return [decode_msg(msg, delta) for (delta, msg) in track]
def encode_msg(msg):
if msg.is_meta:
spec = _META_SPEC_BY_TYPE[msg.type]
data = spec.encode(msg)
msg_bytes = bytes([0xff, spec.type_byte]) + bytes(data)
elif isinstance(msg, UnknownMetaMessage):
msg_bytes = bytes([0xff, msg.type_byte]) + bytes(data)
else:
msg_bytes = bytes(msg.bytes())
return (msg.time, msg_bytes)
def encode_track(track):
return [encode_msg(msg).to_dict() for msg in track]
def read_midifile(infile):
mid = read_rawmidifile(infile)
mid = mid.copy()
mid['tracks'] = [decode_track(track) for track in mid['tracks']]
return mid
def write_midifile(infile, tracks=(), format=1, resolution=240):
mid = {
'format': format,
'resolution': resolution,
'tracks': [encode_track(track) for track in tracks],
}
write_rawmidifile(infile, **mid)
| 26.559322 | 72 | 0.662412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 250 | 0.159541 |
ba050f61eda5b2d821dc6b5f12862b36a4acfe44 | 8,322 | py | Python | src/Tools/CodeGenerator/Plugins/SharedLibraryTestsPluginImpl/ScalarTypeInfos.py | Bhaskers-Blu-Org2/FeaturizersLibrary | 229ae38ea233bfb02a6ff92ec3a67c1751c58005 | [
"MIT"
] | 15 | 2019-12-14T07:54:18.000Z | 2021-03-14T14:53:28.000Z | src/Tools/CodeGenerator/Plugins/SharedLibraryTestsPluginImpl/ScalarTypeInfos.py | Bhaskers-Blu-Org2/FeaturizersLibrary | 229ae38ea233bfb02a6ff92ec3a67c1751c58005 | [
"MIT"
] | 30 | 2019-12-03T20:58:56.000Z | 2020-04-21T23:34:39.000Z | src/Tools/CodeGenerator/Plugins/SharedLibraryTestsPluginImpl/ScalarTypeInfos.py | microsoft/FeaturizersLibrary | 229ae38ea233bfb02a6ff92ec3a67c1751c58005 | [
"MIT"
] | 13 | 2020-01-23T00:18:47.000Z | 2021-10-04T17:46:45.000Z | # ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Contains the scalar type info objects"""
import os
import textwrap
import CommonEnvironment
from CommonEnvironment import Interface
from Plugins.SharedLibraryTestsPluginImpl.TypeInfo import TypeInfo
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
class _ScalarTypeInfo(TypeInfo):
"""Functionality common to all scalars"""
# ----------------------------------------------------------------------
# |
# | Public Properties
# |
# ----------------------------------------------------------------------
@Interface.abstractproperty
def CType(self):
"""C type"""
raise Exception("Abstract property")
# ----------------------------------------------------------------------
# |
# | Public Methods
# |
# ----------------------------------------------------------------------
def __init__(
self,
*args,
member_type=None,
**kwargs
):
if member_type is None:
return
super(_ScalarTypeInfo, self).__init__(*args, **kwargs)
self.RequiresOptionalType = self.IsOptional and self.TypeName not in ["float", "double"]
# ----------------------------------------------------------------------
@Interface.override
def GetTransformInputArgs(
self,
input_name="input",
):
if self.RequiresOptionalType:
return "Microsoft::Featurizer::Traits<typename Microsoft::Featurizer::Traits<{cpp_type}>::nullable_type>::IsNull({input_name}) ? nullptr : &Microsoft::Featurizer::Traits<typename Microsoft::Featurizer::Traits<{cpp_type}>::nullable_type>::GetNullableValue({input_name})".format(
cpp_type=self.CppType,
input_name=input_name,
)
return input_name
# ----------------------------------------------------------------------
@Interface.override
def GetTransformInputBufferArgs(
self,
input_name='input',
):
if self.RequiresOptionalType:
raise NotImplementedError("Not implemented yet")
return "{name}.data(), {name}.size()".format(
name=input_name,
)
# ----------------------------------------------------------------------
@Interface.override
def GetOutputInfo(
self,
invocation_template,
result_name="result",
):
result_name = "{}_value".format(result_name)
if self.RequiresOptionalType:
vector_type = "nonstd::optional<{}>".format(self.CppType)
local_type = "{} *".format(self.CppType)
statement = "{name} ? std::move(*{name}) : nonstd::optional<{type}>()".format(
type=self.CppType,
name=result_name,
)
else:
vector_type = self.CppType
local_type = self.CppType
if self.TypeName == "bool":
# vector<bool> doesn't support `emplace_back` on older compilers
statement = result_name
else:
statement = "std::move({})".format(result_name)
return self.Result(
vector_type,
[self.Type(local_type, result_name)],
invocation_template.format(statement),
)
# ----------------------------------------------------------------------
@Interface.staticderived
class Int8TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("int8")
CppType = Interface.DerivedProperty("std::int8_t")
CType = Interface.DerivedProperty("int8_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class Int16TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("int16")
CppType = Interface.DerivedProperty("std::int16_t")
CType = Interface.DerivedProperty("int16_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class Int32TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("int32")
CppType = Interface.DerivedProperty("std::int32_t")
CType = Interface.DerivedProperty("int32_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class Int64TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("int64")
CppType = Interface.DerivedProperty("std::int64_t")
CType = Interface.DerivedProperty("int64_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class UInt8TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("uint8")
CppType = Interface.DerivedProperty("std::uint8_t")
CType = Interface.DerivedProperty("uint8_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class UInt16TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("uint16")
CppType = Interface.DerivedProperty("std::uint16_t")
CType = Interface.DerivedProperty("uint16_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class UInt32TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("uint32")
CppType = Interface.DerivedProperty("std::uint32_t")
CType = Interface.DerivedProperty("uint32_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class UInt64TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("uint64")
CppType = Interface.DerivedProperty("std::uint64_t")
CType = Interface.DerivedProperty("uint64_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class FloatTypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("float")
CppType = Interface.DerivedProperty("std::float_t")
CType = Interface.DerivedProperty("float")
# ----------------------------------------------------------------------
@Interface.staticderived
class DoubleTypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("double")
CppType = Interface.DerivedProperty("std::double_t")
CType = Interface.DerivedProperty("double")
# ----------------------------------------------------------------------
@Interface.staticderived
class BoolTypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("bool")
CppType = Interface.DerivedProperty("bool")
CType = Interface.DerivedProperty("bool")
| 41.819095 | 290 | 0.43403 | 6,311 | 0.758351 | 0 | 0 | 5,517 | 0.662942 | 0 | 0 | 2,802 | 0.336698 |
ba0556544146fb79fec4279f3882b808667993f8 | 4,746 | py | Python | testing/solid_growthremodeling_fiberstretch.py | marchirschvogel/amb | af48b2a672cfcfb7a081020cda599fde85aa6b65 | [
"BSD-4-Clause"
] | null | null | null | testing/solid_growthremodeling_fiberstretch.py | marchirschvogel/amb | af48b2a672cfcfb7a081020cda599fde85aa6b65 | [
"BSD-4-Clause"
] | null | null | null | testing/solid_growthremodeling_fiberstretch.py | marchirschvogel/amb | af48b2a672cfcfb7a081020cda599fde85aa6b65 | [
"BSD-4-Clause"
] | null | null | null | #!/usr/bin/env python3
### block that grows in fiber direction triggered by fiber stretch and remodels to softer material
# TODO: Somehow, this does not converge quadratically at the end (seems irrespective of remodeling,
# but likely to be attributed to the growth in fiber direction) ---> check linearization terms!
# only one hex element in this testcase - cannot be run on multiple cores!
import ambit
import sys, traceback
import numpy as np
from pathlib import Path
import results_check
def main():
basepath = str(Path(__file__).parent.absolute())
IO_PARAMS = {'problem_type' : 'solid',
'mesh_domain' : ''+basepath+'/input/blockhex_domain.xdmf',
'mesh_boundary' : ''+basepath+'/input/blockhex_boundary.xdmf',
'fiber_data' : {'nodal' : [''+basepath+'/input/fib1_blockhex.txt',''+basepath+'/input/fib2_blockhex.txt']},
'write_results_every' : -999,
'output_path' : ''+basepath+'/tmp/',
'results_to_write' : ['displacement','theta','fiberstretch','fiberstretch_e','phi_remod'],
'simname' : 'solid_growthremodeling_fiberstretch'}
SOLVER_PARAMS_SOLID = {'solve_type' : 'direct',
'tol_res' : 1.0e-8,
'tol_inc' : 1.0e-8}
TIME_PARAMS_SOLID = {'maxtime' : 1.0,
'numstep' : 20,
'timint' : 'static'}
FEM_PARAMS = {'order_disp' : 1,
'order_pres' : 1,
'quad_degree' : 3,
'incompressible_2field' : False}
MATERIALS = {'MAT1' : {'neohooke_dev' : {'mu' : 10.},
'ogden_vol' : {'kappa' : 10./(1.-2.*0.49)},
'growth' : {'growth_dir' : 'isotropic', # isotropic, fiber, crossfiber, radial
'growth_trig' : 'fibstretch', # fibstretch, volstress, prescribed
'growth_thres' : 1.15,
'thetamax' : 3.0,
'thetamin' : 1.0,
'tau_gr' : 1.0,
'gamma_gr' : 1.72,
'tau_gr_rev' : 10000.0,
'gamma_gr_rev' : 1.0,
'remodeling_mat' : {'neohooke_dev' : {'mu' : 3.},
'ogden_vol' : {'kappa' : 3./(1.-2.*0.49)}}}}}
# define your load curves here (syntax: tcX refers to curve X, to be used in BC_DICT key 'curve' : [X,0,0], or 'curve' : X)
class time_curves():
def tc1(self, t):
pmax = 10.0
return pmax*t/TIME_PARAMS_SOLID['maxtime']
BC_DICT = { 'dirichlet' : [{'id' : [1], 'dir' : 'x', 'val' : 0.},
{'id' : [2], 'dir' : 'y', 'val' : 0.},
{'id' : [3], 'dir' : 'z', 'val' : 0.}],
'neumann' : [{'type' : 'pk1', 'id' : [4], 'dir' : 'xyz', 'curve' : [1,0,0]}] }
# problem setup
problem = ambit.Ambit(IO_PARAMS, TIME_PARAMS_SOLID, SOLVER_PARAMS_SOLID, FEM_PARAMS, MATERIALS, BC_DICT, time_curves=time_curves())
# solve time-dependent problem
problem.solve_problem()
# --- results check
tol = 1.0e-6
check_node = []
check_node.append(np.array([1.0, 1.0, 1.0]))
u_corr = np.zeros(3*len(check_node))
## correct results
u_corr[0] = 1.0812823521095760E+00 # x
u_corr[1] = -1.4360291810029382E-01 # y
u_corr[2] = -1.4360291810029457E-01 # z
check1 = results_check.results_check_node(problem.mp.u, check_node, u_corr, problem.mp.V_u, problem.mp.comm, tol=tol, nm='u')
success = results_check.success_check([check1], problem.mp.comm)
return success
if __name__ == "__main__":
success = False
try:
success = main()
except:
print(traceback.format_exc())
if success:
sys.exit(0)
else:
sys.exit(1)
| 42 | 146 | 0.439528 | 134 | 0.028234 | 0 | 0 | 0 | 0 | 0 | 0 | 1,517 | 0.319638 |
ba07d0c821ac43a4b143f06257b21b47e9c896a7 | 4,061 | py | Python | src/dynamodb_encryption_sdk/materials/__init__.py | ajw-aws/aws-dynamodb-encryption-python | 4374ad5d87c7ea66c249c5f458cac6ff4465d437 | [
"Apache-2.0"
] | null | null | null | src/dynamodb_encryption_sdk/materials/__init__.py | ajw-aws/aws-dynamodb-encryption-python | 4374ad5d87c7ea66c249c5f458cac6ff4465d437 | [
"Apache-2.0"
] | 1 | 2021-03-20T05:42:35.000Z | 2021-03-20T05:42:35.000Z | src/dynamodb_encryption_sdk/materials/__init__.py | gwsu2008/aws-dynamodb-encryption-python | 80465a4b62b1198d519b0ba96da1643eb8af2c9a | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Cryptographic materials are containers that provide delegated keys for cryptographic operations."""
import abc
import six
from dynamodb_encryption_sdk.delegated_keys import DelegatedKey # noqa pylint: disable=unused-import
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from typing import Dict, Text # noqa pylint: disable=unused-import
from mypy_extensions import NoReturn # noqa pylint: disable=unused-import
except ImportError: # pragma: no cover
# We only actually need these imports when running the mypy checks
pass
__all__ = ("CryptographicMaterials", "EncryptionMaterials", "DecryptionMaterials")
@six.add_metaclass(abc.ABCMeta)
class CryptographicMaterials(object):
"""Base class for all cryptographic materials."""
@abc.abstractproperty
def material_description(self):
# type: () -> Dict[Text, Text]
"""Material description to use with these cryptographic materials.
:returns: Material description
:rtype: dict
"""
@abc.abstractproperty
def encryption_key(self):
# type: () -> DelegatedKey
"""Delegated key used for encrypting attributes.
:returns: Encryption key
:rtype: DelegatedKey
"""
@abc.abstractproperty
def decryption_key(self):
# type: () -> DelegatedKey
"""Delegated key used for decrypting attributes.
:returns: Decryption key
:rtype: DelegatedKey
"""
@abc.abstractproperty
def signing_key(self):
# type: () -> DelegatedKey
"""Delegated key used for calculating digital signatures.
:returns: Signing key
:rtype: DelegatedKey
"""
@abc.abstractproperty
def verification_key(self):
# type: () -> DelegatedKey
"""Delegated key used for verifying digital signatures.
:returns: Verification key
:rtype: DelegatedKey
"""
class EncryptionMaterials(CryptographicMaterials):
"""Base class for all encryption materials."""
@property
def decryption_key(self):
# type: () -> NoReturn
"""Encryption materials do not provide decryption keys.
:raises NotImplementedError: because encryption materials do not contain decryption keys
"""
raise NotImplementedError("Encryption materials do not provide decryption keys.")
@property
def verification_key(self):
# type: () -> NoReturn
"""Encryption materials do not provide verification keys.
:raises NotImplementedError: because encryption materials do not contain verification keys
"""
raise NotImplementedError("Encryption materials do not provide verification keys.")
class DecryptionMaterials(CryptographicMaterials):
"""Base class for all decryption materials."""
@property
def encryption_key(self):
# type: () -> NoReturn
"""Decryption materials do not provide encryption keys.
:raises NotImplementedError: because decryption materials do not contain encryption keys
"""
raise NotImplementedError("Decryption materials do not provide encryption keys.")
@property
def signing_key(self):
# type: () -> NoReturn
"""Decryption materials do not provide signing keys.
:raises NotImplementedError: because decryption materials do not contain signing keys
"""
raise NotImplementedError("Decryption materials do not provide signing keys.")
| 33.01626 | 102 | 0.690717 | 2,809 | 0.691702 | 0 | 0 | 2,615 | 0.64393 | 0 | 0 | 2,848 | 0.701305 |
ba098b80df6e4bfc3afb72bb393d9f053635499b | 7,441 | py | Python | src/ramstk/views/gtk3/preferences/view.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 26 | 2019-05-15T02:03:47.000Z | 2022-02-21T07:28:11.000Z | src/ramstk/views/gtk3/preferences/view.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 815 | 2019-05-10T12:31:52.000Z | 2022-03-31T12:56:26.000Z | src/ramstk/views/gtk3/preferences/view.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 9 | 2019-04-20T23:06:29.000Z | 2022-01-24T21:21:04.000Z | # pylint: disable=unused-import, missing-docstring
# -*- coding: utf-8 -*-
#
# ramstk.views.gtk3.preferences.view.py is part of the RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""RAMSTK GTK3 Preferences Views."""
# Standard Library Imports
from datetime import datetime
from shutil import copyfile
from typing import Any, Dict
# Third Party Imports
import toml
from pubsub import pub
# RAMSTK Package Imports
from ramstk.configuration import RAMSTKUserConfiguration
from ramstk.logger import RAMSTKLogManager
from ramstk.utilities import integer_to_boolean
from ramstk.views.gtk3 import Gtk, _
from ramstk.views.gtk3.widgets import RAMSTKBaseView, RAMSTKLabel
# RAMSTK Local Imports
from . import (
GeneralPreferencesPanel,
LookFeelPreferencesPanel,
TreeLayoutPreferencesPanel,
)
class PreferencesDialog(RAMSTKBaseView):
"""Assistant to provide a GUI to set various RAMSTK config preferences.
RAMSTK preferences are stored in the RAMSTK Site database and the user's Site
configuration file and Program configuration file. Configurations preferences
are stored in Site.conf or RAMSTK.conf in each user's $HOME/.config/RAMSTK
directory and are applicable only to that specific user. Configuration
preferences are edited with the Preferences assistant.
"""
# Define private dict class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_tag = "preferences"
_pixbuf: bool = True
_tablabel: str = _("")
_tabtooltip: str = _("")
# Define public dict class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(
self, configuration: RAMSTKUserConfiguration, logger: RAMSTKLogManager
) -> None:
"""Initialize an instance of the Preferences assistant.
:param configuration: the RAMSTKUserConfiguration class instance.
:param logger: the RAMSTKLogManager class instance.
"""
super().__init__(configuration, logger)
# Initialize private dictionary attributes.
# Initialize private list attributes.
self._lst_callbacks = [self._do_request_update, self._cancel]
self._lst_icons = ["save", "cancel"]
self._lst_tooltips = [
_(
f"Save changes to RAMSTK program configuration file "
f"{configuration.RAMSTK_CONF_DIR}/RAMSTK.toml."
),
_("Quit the RAMSTK preferences assistant without saving."),
]
# Initialize private scalar attributes.
self._pnlGeneralPreferences: GeneralPreferencesPanel = GeneralPreferencesPanel()
self._pnlLookFeel: LookFeelPreferencesPanel = LookFeelPreferencesPanel()
self._pnlTreeViewLayout: TreeLayoutPreferencesPanel = (
TreeLayoutPreferencesPanel()
)
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.__make_ui()
pub.sendMessage(
"request_load_preferences",
configuration=self.RAMSTK_USER_CONFIGURATION,
)
def _cancel(self, __button: Gtk.Button) -> None:
"""Quit the preferences Gtk.Assistant().
:param __button: the Gtk.Button() that called this method.
:return: None
"""
_parent = self.get_parent()
_parent.destroy()
def _do_request_update(self, __button: Gtk.Button) -> None:
"""Request to update the user and program preferences.
:param __button: the Gtk.Button() that called this method.
:return: None
"""
_conf_file = self.RAMSTK_USER_CONFIGURATION.RAMSTK_CONF_DIR + "/RAMSTK.toml"
copyfile(_conf_file, _conf_file + "_bak")
self.RAMSTK_USER_CONFIGURATION.set_user_configuration()
try:
self._do_save_tree_layout()
# This happens when no format file was edited.
except FileNotFoundError:
pass
def _do_save_tree_layout(self) -> None:
"""Save the Module View RAMSTKTreeView() layout file.
:return: None
"""
_layout: Dict[str, Any] = {
"pixbuf": "False",
"defaulttitle": {},
"usertitle": {},
"datatype": {},
"position": {},
"widget": {},
"editable": {},
"visible": {},
"key": {},
}
copyfile(
self._pnlTreeViewLayout.fmt_file, self._pnlTreeViewLayout.fmt_file + "_bak"
)
# Get the format file for the Gtk.TreeView to be edited. Make a
# backup copy by appending the current date.
_now = datetime.today().strftime("%Y%m%d")
_bak_file = f"{self._pnlTreeViewLayout.fmt_file[:-5]}_bak_{_now}.toml"
copyfile(self._pnlTreeViewLayout.fmt_file, _bak_file)
# Open the format file for writing.
with open(self._pnlTreeViewLayout.fmt_file, "w", encoding="utf-8") as _file:
_model = self._pnlTreeViewLayout.tvwTreeView.get_model()
_row = _model.get_iter_first()
while _row is not None:
_key = _model.get_value(_row, 8)
_layout["defaulttitle"][_key] = _model.get_value(_row, 0)
_layout["usertitle"][_key] = _model.get_value(_row, 1)
_layout["position"][_key] = _model.get_value(_row, 2)
_layout["editable"][_key] = integer_to_boolean(
_model.get_value(_row, 3)
)
_layout["visible"][_key] = integer_to_boolean(_model.get_value(_row, 4))
_layout["datatype"][_key] = _model.get_value(_row, 5)
_layout["widget"][_key] = _model.get_value(_row, 6)
_layout["key"][_key] = _model.get_value(_row, 7)
_row = _model.iter_next(_row)
toml.dump(_layout, _file)
def __make_ui(self) -> None:
"""Build the user interface.
:return: None
:rtype: None
"""
super().do_make_layout()
_label = RAMSTKLabel(_("General & Directories"))
_label.do_set_properties(
height=30,
width=-1,
justify=Gtk.Justification.CENTER,
tooltip=_(
"Allows setting general preferences for the open RAMSTK program."
),
)
self._notebook.insert_page(
self._pnlGeneralPreferences, tab_label=_label, position=-1
)
_label = RAMSTKLabel(_("Look & Feel"))
_label.do_set_properties(
height=30,
width=-1,
justify=Gtk.Justification.CENTER,
tooltip=_("Allows setting color and other preferences."),
)
self._notebook.insert_page(self._pnlLookFeel, tab_label=_label, position=-1)
_label = RAMSTKLabel(_("Tree View Layout"))
_label.do_set_properties(
height=30,
width=-1,
justify=Gtk.Justification.CENTER,
tooltip=_("Allows setting tree view layout preferences."),
)
self._notebook.insert_page(
self._pnlTreeViewLayout, tab_label=_label, position=-1
)
self.pack_end(self._notebook, True, True, 0)
self.show_all()
| 33.822727 | 88 | 0.629351 | 6,551 | 0.880392 | 0 | 0 | 0 | 0 | 0 | 0 | 2,879 | 0.38691 |
ba0acde2a7dda4e8e83ad9fe76009f222b95e173 | 1,011 | py | Python | binascii_.py | hlovatt/PyBoardTypeshedGenerator | 1d133cab16ea5d558b03175e6fa48b4a23b76136 | [
"MIT"
] | 5 | 2020-07-26T08:48:39.000Z | 2021-09-13T19:19:37.000Z | binascii_.py | hlovatt/PyBoardTypeshedGenerator | 1d133cab16ea5d558b03175e6fa48b4a23b76136 | [
"MIT"
] | null | null | null | binascii_.py | hlovatt/PyBoardTypeshedGenerator | 1d133cab16ea5d558b03175e6fa48b4a23b76136 | [
"MIT"
] | 1 | 2020-11-07T22:37:44.000Z | 2020-11-07T22:37:44.000Z | """
Generate `pyi` from corresponding `rst` docs.
"""
import rst
from rst2pyi import RST2PyI
__author__ = rst.__author__
__copyright__ = rst.__copyright__
__license__ = rst.__license__
__version__ = "7.2.0" # Version set by https://github.com/hlovatt/tag2ver
def binascii(shed: RST2PyI) -> None:
shed.module(name="binascii", old="binary/ASCII conversions", end=r"Functions")
shed.def_(
old=r".. function:: hexlify(data, [sep])",
new="def hexlify(data: bytes, sep: str | bytes = ..., /) -> bytes",
indent=0,
)
shed.def_(
old=r".. function:: unhexlify(data)",
new="def unhexlify(data: str | bytes, /) -> bytes",
indent=0,
)
shed.def_(
old=r".. function:: a2b_base64(data)",
new="def a2b_base64(data: str | bytes, /) -> bytes",
indent=0,
)
shed.def_(
old=r".. function:: b2a_base64(data)",
new="def b2a_base64(data: bytes, /) -> bytes",
indent=0,
)
shed.write(u_also=True)
| 27.324324 | 82 | 0.593472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 490 | 0.484669 |
ba0ad1cae8bb275fda3b66ce6e02ab294e66f03f | 602 | py | Python | examples/deco_add_param.py | scholer/clize | bc15fc510fa6fb1cc27b1d27ea1b5653e61d2fff | [
"MIT"
] | 390 | 2015-04-05T01:16:35.000Z | 2022-03-30T02:13:52.000Z | examples/deco_add_param.py | scholer/clize | bc15fc510fa6fb1cc27b1d27ea1b5653e61d2fff | [
"MIT"
] | 67 | 2015-03-04T08:15:58.000Z | 2022-03-15T00:16:51.000Z | examples/deco_add_param.py | szaydel/clize | 84fef2080d7748dd36e465bc2048b48ed578d73f | [
"MIT"
] | 28 | 2015-01-11T04:37:08.000Z | 2021-07-07T08:20:20.000Z | from sigtools.wrappers import decorator
from clize import run
@decorator
def with_uppercase(wrapped, *args, uppercase=False, **kwargs):
"""
Formatting options:
:param uppercase: Print output in capitals
"""
ret = wrapped(*args, **kwargs)
if uppercase:
return str(ret).upper()
else:
return ret
@with_uppercase
def hello_world(name=None):
"""Says hello world
:param name: Who to say hello to
"""
if name is not None:
return 'Hello ' + name
else:
return 'Hello world!'
if __name__ == '__main__':
run(hello_world)
| 18.242424 | 62 | 0.629568 | 0 | 0 | 0 | 0 | 484 | 0.803987 | 0 | 0 | 180 | 0.299003 |
ba0ad8ee524c81c0738f3b1a9b9dc55e45851dd6 | 257 | py | Python | spikey/core/__init__.py | SpikeyCNS/spikey | 03a49073491974eff01bc017fd8eadb822e13f0d | [
"MIT"
] | 4 | 2021-02-25T20:53:41.000Z | 2022-01-18T15:27:07.000Z | spikey/core/__init__.py | SpikeyCNS/spikey | 03a49073491974eff01bc017fd8eadb822e13f0d | [
"MIT"
] | 5 | 2021-03-06T05:35:10.000Z | 2021-03-31T09:27:57.000Z | spikey/core/__init__.py | SpikeyCNS/spikey | 03a49073491974eff01bc017fd8eadb822e13f0d | [
"MIT"
] | null | null | null | """
Core __init__.
"""
try:
from spikey.core.callback import ExperimentCallback, RLCallback, TDCallback
from spikey.core.training_loop import TrainingLoop, GenericLoop
except ImportError as e:
raise ImportError(f"core/__init__.py failed: {e}")
| 25.7 | 79 | 0.758755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.206226 |
ba0b04b4ada95663d8638daf31f1153f553fb61f | 912 | py | Python | src/abaqus/Calibration/CalibrationModel.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | 7 | 2022-01-21T09:15:45.000Z | 2022-02-15T09:31:58.000Z | src/abaqus/Calibration/CalibrationModel.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | src/abaqus/Calibration/CalibrationModel.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | from ..Calibration.Calibration import Calibration
from ..Model.ModelBase import ModelBase
class CalibrationModel(ModelBase):
"""Abaqus creates a Model object named `Model-1` when a session is started.
Notes
-----
This object can be accessed by:
.. code-block:: python
mdb.models[name]
"""
def Calibration(self, name: str) -> Calibration:
"""This method creates a Calibration object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].Calibration
Parameters
----------
name
A String specifying the name of the new calibration.
Returns
-------
A Calibration object.
"""
self.calibrations[name] = calibration = Calibration(name)
return calibration
| 22.8 | 79 | 0.557018 | 819 | 0.898026 | 0 | 0 | 0 | 0 | 0 | 0 | 624 | 0.684211 |
ba0daa36434b219c2f65075a454c745560959d30 | 171 | py | Python | Python-Programming(icourse163.org)/7/7-2.py | takuron/Lesson | 226078131d240d07d3b0025eaed8cdeba998a2c4 | [
"MIT"
] | null | null | null | Python-Programming(icourse163.org)/7/7-2.py | takuron/Lesson | 226078131d240d07d3b0025eaed8cdeba998a2c4 | [
"MIT"
] | null | null | null | Python-Programming(icourse163.org)/7/7-2.py | takuron/Lesson | 226078131d240d07d3b0025eaed8cdeba998a2c4 | [
"MIT"
] | null | null | null | import math
x = int(input())
sinn = math.sin(math.radians(15))
son = math.exp(x)-5*x
mon = math.pow((x**2+1),0.5)
lnn = math.log(3*x)
print("%.10f"%(sinn+son/mon-lnn))
| 15.545455 | 33 | 0.608187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.040936 |
ba0f201ff461642700e5183d5d5d537e2f37ac6f | 332,638 | py | Python | kvmagent/kvmagent/plugins/vm_plugin.py | zstackio/zstack-utility | 919d686d46c68836cbcad51ab0b8bf53bc88abda | [
"ECL-2.0",
"Apache-2.0"
] | 55 | 2017-02-10T07:55:21.000Z | 2021-09-01T00:59:36.000Z | kvmagent/kvmagent/plugins/vm_plugin.py | zstackio/zstack-utility | 919d686d46c68836cbcad51ab0b8bf53bc88abda | [
"ECL-2.0",
"Apache-2.0"
] | 106 | 2017-02-13T09:58:27.000Z | 2022-02-15T09:51:48.000Z | kvmagent/kvmagent/plugins/vm_plugin.py | zstackio/zstack-utility | 919d686d46c68836cbcad51ab0b8bf53bc88abda | [
"ECL-2.0",
"Apache-2.0"
] | 68 | 2017-02-13T11:02:01.000Z | 2021-12-16T11:02:01.000Z | '''
@author: Frank
'''
import contextlib
import os.path
import tempfile
import time
import datetime
import traceback
import xml.etree.ElementTree as etree
import re
import platform
import netaddr
import uuid
import shutil
import simplejson
import base64
import uuid
import json
import socket
from signal import SIGKILL
import syslog
import threading
import libvirt
import xml.dom.minidom as minidom
#from typing import List, Any, Union
from distutils.version import LooseVersion
import zstacklib.utils.ip as ip
import zstacklib.utils.ebtables as ebtables
import zstacklib.utils.iptables as iptables
import zstacklib.utils.lock as lock
from kvmagent import kvmagent
from kvmagent.plugins.baremetal_v2_gateway_agent import \
BaremetalV2GatewayAgentPlugin as BmV2GwAgent
from kvmagent.plugins.bmv2_gateway_agent import utils as bm_utils
from kvmagent.plugins.imagestore import ImageStoreClient
from zstacklib.utils import bash, plugin
from zstacklib.utils.bash import in_bash
from zstacklib.utils import jsonobject
from zstacklib.utils import lvm
from zstacklib.utils import shell
from zstacklib.utils import uuidhelper
from zstacklib.utils import xmlobject
from zstacklib.utils import misc
from zstacklib.utils import qemu_img
from zstacklib.utils import ebtables
from zstacklib.utils import vm_operator
from zstacklib.utils import pci
from zstacklib.utils.report import *
from zstacklib.utils.vm_plugin_queue_singleton import VmPluginQueueSingleton
from zstacklib.utils.libvirt_event_manager_singleton import LibvirtEventManager
from zstacklib.utils.libvirt_event_manager_singleton import LibvirtEventManagerSingleton
from distutils.version import LooseVersion
logger = log.get_logger(__name__)
HOST_ARCH = platform.machine()
DIST_NAME = platform.dist()[0]
ZS_XML_NAMESPACE = 'http://zstack.org'
etree.register_namespace('zs', ZS_XML_NAMESPACE)
GUEST_TOOLS_ISO_PATH = "/var/lib/zstack/guesttools/GuestTools.iso"
QMP_SOCKET_PATH = "/var/lib/libvirt/qemu/zstack"
PCI_ROM_PATH = "/var/lib/zstack/pcirom"
class RetryException(Exception):
pass
class NicTO(object):
def __init__(self):
self.mac = None
self.bridgeName = None
self.deviceId = None
class RemoteStorageFactory(object):
@staticmethod
def get_remote_storage(cmd):
if cmd.storageInfo and cmd.storageInfo.type == 'nfs':
return NfsRemoteStorage(cmd)
else:
return SshfsRemoteStorage(cmd)
class RemoteStorage(object):
def __init__(self, cmd):
self.mount_point = tempfile.mkdtemp(prefix="zs-backup")
def mount(self):
raise Exception('function mount not be implemented')
def umount(self):
raise Exception('function umount not be implemented')
def clean(self):
linux.rmdir_if_empty(self.mount_point)
class NfsRemoteStorage(RemoteStorage):
def __init__(self, cmd):
super(NfsRemoteStorage, self).__init__(cmd)
self.options = cmd.storageInfo.options
self.url = cmd.storageInfo.url
relative_work_dir = cmd.uploadDir.replace(os.path.normpath(cmd.bsPath), '').lstrip(os.path.sep)
self.local_work_dir = os.path.join(self.mount_point, relative_work_dir)
self.remote_work_dir = os.path.join(self.url, relative_work_dir)
def mount(self):
linux.mount(self.url, self.mount_point, self.options)
def umount(self):
if linux.is_mounted(path=self.mount_point):
linux.umount(self.mount_point)
class SshfsRemoteStorage(RemoteStorage):
def __init__(self, cmd):
super(SshfsRemoteStorage, self).__init__(cmd)
self.bandwidth = cmd.networkWriteBandwidth
self.username = cmd.username
self.hostname = cmd.hostname
self.port = cmd.sshPort
self.password = cmd.password
self.dst_dir = cmd.uploadDir
self.vm_uuid = cmd.vmUuid
self.remote_work_dir = cmd.uploadDir
self.local_work_dir = self.mount_point
def mount(self):
if 0 != linux.sshfs_mount_with_vm_uuid(self.vm_uuid, self.username, self.hostname, self.port,
self.password, self.dst_dir, self.mount_point, self.bandwidth):
raise kvmagent.KvmError("failed to prepare backup space for [vm:%s]" % self.vm_uuid)
def umount(self):
for i in xrange(6):
if linux.fumount(self.mount_point, 5) == 0:
break
else:
time.sleep(5)
class StartVmCmd(kvmagent.AgentCommand):
@log.sensitive_fields("consolePassword")
def __init__(self):
super(StartVmCmd, self).__init__()
self.vmInstanceUuid = None
self.vmName = None
self.memory = None
self.cpuNum = None
self.cpuSpeed = None
self.bootDev = None
self.rootVolume = None
self.dataVolumes = []
self.cacheVolumes = []
self.isoPath = None
self.nics = []
self.timeout = None
self.dataIsoPaths = None
self.addons = None
self.useBootMenu = True
self.vmCpuModel = None
self.emulateHyperV = False
self.additionalQmp = True
self.isApplianceVm = False
self.systemSerialNumber = None
self.bootMode = None
self.consolePassword = None
class StartVmResponse(kvmagent.AgentResponse):
def __init__(self):
super(StartVmResponse, self).__init__()
class PciAddressInfo():
def __init__(self):
self.type = None
self.domain = None
self.bus = None
self.slot = None
self.function = None
class AttchNicResponse(kvmagent.AgentResponse):
def __init__(self):
super(AttchNicResponse, self).__init__()
self.pciAddress = PciAddressInfo()
class GetVncPortCmd(kvmagent.AgentCommand):
def __init__(self):
super(GetVncPortCmd, self).__init__()
self.vmUuid = None
class GetVncPortResponse(kvmagent.AgentResponse):
def __init__(self):
super(GetVncPortResponse, self).__init__()
self.port = None
self.protocol = None
self.vncPort = None
self.spicePort = None
self.spiceTlsPort = None
class ChangeCpuMemResponse(kvmagent.AgentResponse):
def _init_(self):
super(ChangeCpuMemResponse, self)._init_()
self.cpuNum = None
self.memorySize = None
self.vmuuid
class IncreaseCpuResponse(kvmagent.AgentResponse):
def __init__(self):
super(IncreaseCpuResponse, self).__init__()
self.cpuNum = None
self.vmUuid = None
class IncreaseMemoryResponse(kvmagent.AgentResponse):
def __init__(self):
super(IncreaseMemoryResponse, self).__init__()
self.memorySize = None
self.vmUuid = None
class StopVmCmd(kvmagent.AgentCommand):
def __init__(self):
super(StopVmCmd, self).__init__()
self.uuid = None
self.timeout = None
class StopVmResponse(kvmagent.AgentResponse):
def __init__(self):
super(StopVmResponse, self).__init__()
class PauseVmCmd(kvmagent.AgentCommand):
def __init__(self):
super(PauseVmCmd, self).__init__()
self.uuid = None
self.timeout = None
class PauseVmResponse(kvmagent.AgentResponse):
def __init__(self):
super(PauseVmResponse, self).__init__()
class ResumeVmCmd(kvmagent.AgentCommand):
def __init__(self):
super(ResumeVmCmd, self).__init__()
self.uuid = None
self.timeout = None
class ResumeVmResponse(kvmagent.AgentResponse):
def __init__(self):
super(ResumeVmResponse, self).__init__()
class RebootVmCmd(kvmagent.AgentCommand):
def __init__(self):
super(RebootVmCmd, self).__init__()
self.uuid = None
self.timeout = None
class RebootVmResponse(kvmagent.AgentResponse):
def __init__(self):
super(RebootVmResponse, self).__init__()
class DestroyVmCmd(kvmagent.AgentCommand):
def __init__(self):
super(DestroyVmCmd, self).__init__()
self.uuid = None
class DestroyVmResponse(kvmagent.AgentResponse):
def __init__(self):
super(DestroyVmResponse, self).__init__()
class VmSyncCmd(kvmagent.AgentCommand):
def __init__(self):
super(VmSyncCmd, self).__init__()
class VmSyncResponse(kvmagent.AgentResponse):
def __init__(self):
super(VmSyncResponse, self).__init__()
self.states = None
self.vmInShutdowns = None
class AttachDataVolumeCmd(kvmagent.AgentCommand):
def __init__(self):
super(AttachDataVolumeCmd, self).__init__()
self.volume = None
self.uuid = None
class AttachDataVolumeResponse(kvmagent.AgentResponse):
def __init__(self):
super(AttachDataVolumeResponse, self).__init__()
class DetachDataVolumeCmd(kvmagent.AgentCommand):
def __init__(self):
super(DetachDataVolumeCmd, self).__init__()
self.volume = None
self.uuid = None
class DetachDataVolumeResponse(kvmagent.AgentResponse):
def __init__(self):
super(DetachDataVolumeResponse, self).__init__()
class MigrateVmResponse(kvmagent.AgentResponse):
def __init__(self):
super(MigrateVmResponse, self).__init__()
class TakeSnapshotResponse(kvmagent.AgentResponse):
def __init__(self):
super(TakeSnapshotResponse, self).__init__()
self.newVolumeInstallPath = None
self.snapshotInstallPath = None
self.size = None
class TakeVolumeBackupCommand(kvmagent.AgentCommand):
@log.sensitive_fields("password")
def __init__(self):
super(TakeVolumeBackupCommand, self).__init__()
self.hostname = None
self.username = None
self.password = None
self.sshPort = 22
self.bsPath = None
self.uploadDir = None
self.vmUuid = None
self.volume = None
self.bitmap = None
self.lastBackup = None
self.networkWriteBandwidth = 0L
self.volumeWriteBandwidth = 0L
self.maxIncremental = 0
self.mode = None
self.storageInfo = None
class TakeVolumeBackupResponse(kvmagent.AgentResponse):
def __init__(self):
super(TakeVolumeBackupResponse, self).__init__()
self.backupFile = None
self.parentInstallPath = None
self.bitmap = None
class VolumeBackupInfo(object):
def __init__(self, deviceId, bitmap, backupFile, parentInstallPath):
self.deviceId = deviceId
self.bitmap = bitmap
self.backupFile = backupFile
self.parentInstallPath = parentInstallPath
class TakeVolumesBackupsCommand(kvmagent.AgentCommand):
@log.sensitive_fields("password")
def __init__(self):
super(TakeVolumesBackupsCommand, self).__init__()
self.hostname = None
self.username = None
self.password = None
self.sshPort = 22
self.bsPath = None
self.uploadDir = None
self.vmUuid = None
self.backupInfos = []
self.deviceIds = [] # type:list[int]
self.networkWriteBandwidth = 0L
self.volumeWriteBandwidth = 0L
self.maxIncremental = 0
self.mode = None
self.volumes = []
self.storageInfo = None
class TakeVolumesBackupsResponse(kvmagent.AgentResponse):
def __init__(self):
super(TakeVolumesBackupsResponse, self).__init__()
self.backupInfos = [] # type: list[VolumeBackupInfo]
class TakeSnapshotsCmd(kvmagent.AgentCommand):
snapshotJobs = None # type: list[VolumeSnapshotJobStruct]
def __init__(self):
super(TakeSnapshotsCmd, self).__init__()
self.snapshotJobs = []
class TakeSnapshotsResponse(kvmagent.AgentResponse):
snapshots = None # type: List[VolumeSnapshotResultStruct]
def __init__(self):
super(TakeSnapshotsResponse, self).__init__()
self.snapshots = []
class CancelBackupJobsCmd(kvmagent.AgentCommand):
def __init__(self):
super(CancelBackupJobsCmd, self).__init__()
self.vmUuid = None
class CancelBackupJobsResponse(kvmagent.AgentResponse):
def __init__(self):
super(CancelBackupJobsResponse, self).__init__()
class MergeSnapshotRsp(kvmagent.AgentResponse):
def __init__(self):
super(MergeSnapshotRsp, self).__init__()
class LogoutIscsiTargetRsp(kvmagent.AgentResponse):
def __init__(self):
super(LogoutIscsiTargetRsp, self).__init__()
class LoginIscsiTargetCmd(kvmagent.AgentCommand):
@log.sensitive_fields("chapPassword")
def __init__(self):
super(LoginIscsiTargetCmd, self).__init__()
self.hostname = None
self.port = None # type:int
self.target = None
self.chapUsername = None
self.chapPassword = None
class LoginIscsiTargetRsp(kvmagent.AgentResponse):
def __init__(self):
super(LoginIscsiTargetRsp, self).__init__()
class ReportVmStateCmd(object):
def __init__(self):
self.hostUuid = None
self.vmUuid = None
self.vmState = None
class ReportVmShutdownEventCmd(object):
def __init__(self):
self.vmUuid = None
class ReportVmRebootEventCmd(object):
def __init__(self):
self.vmUuid = None
class CheckVmStateRsp(kvmagent.AgentResponse):
def __init__(self):
super(CheckVmStateRsp, self).__init__()
self.states = {}
class CheckColoVmStateRsp(kvmagent.AgentResponse):
def __init__(self):
super(CheckColoVmStateRsp, self).__init__()
self.state = None
self.mode = None
class ChangeVmPasswordCmd(kvmagent.AgentCommand):
@log.sensitive_fields("accountPerference.accountPassword")
def __init__(self):
super(ChangeVmPasswordCmd, self).__init__()
self.accountPerference = AccountPerference() # type:AccountPerference
self.timeout = 0L
class ChangeVmPasswordRsp(kvmagent.AgentResponse):
def __init__(self):
super(ChangeVmPasswordRsp, self).__init__()
self.accountPerference = None
class AccountPerference(object):
def __init__(self):
self.userAccount = None
self.accountPassword = None
self.vmUuid = None
class ReconnectMeCmd(object):
def __init__(self):
self.hostUuid = None
self.reason = None
class FailOverCmd(object):
def __init__(self):
self.vmInstanceUuid = None
self.hostUuid = None
self.reason = None
self.primaryVmFailure = None
class HotPlugPciDeviceCommand(kvmagent.AgentCommand):
def __init__(self):
super(HotPlugPciDeviceCommand, self).__init__()
self.pciDeviceAddress = None
self.vmUuid = None
class HotPlugPciDeviceRsp(kvmagent.AgentResponse):
def __init__(self):
super(HotPlugPciDeviceRsp, self).__init__()
class HotUnplugPciDeviceCommand(kvmagent.AgentCommand):
def __init__(self):
super(HotUnplugPciDeviceCommand, self).__init__()
self.pciDeviceAddress = None
self.vmUuid = None
class HotUnplugPciDeviceRsp(kvmagent.AgentResponse):
def __init__(self):
super(HotUnplugPciDeviceRsp, self).__init__()
class AttachPciDeviceToHostCommand(kvmagent.AgentCommand):
def __init__(self):
super(AttachPciDeviceToHostCommand, self).__init__()
self.pciDeviceAddress = None
class AttachPciDeviceToHostRsp(kvmagent.AgentResponse):
def __init__(self):
super(AttachPciDeviceToHostRsp, self).__init__()
class DetachPciDeviceFromHostCommand(kvmagent.AgentCommand):
def __init__(self):
super(DetachPciDeviceFromHostCommand, self).__init__()
self.pciDeviceAddress = None
class DetachPciDeviceFromHostRsp(kvmagent.AgentResponse):
def __init__(self):
super(DetachPciDeviceFromHostRsp, self).__init__()
class KvmAttachUsbDeviceRsp(kvmagent.AgentResponse):
def __init__(self):
super(KvmAttachUsbDeviceRsp, self).__init__()
class KvmDetachUsbDeviceRsp(kvmagent.AgentResponse):
def __init__(self):
super(KvmDetachUsbDeviceRsp, self).__init__()
class ReloadRedirectUsbRsp(kvmagent.AgentResponse):
def __init__(self):
super(ReloadRedirectUsbRsp, self).__init__()
class CheckMountDomainRsp(kvmagent.AgentResponse):
def __init__(self):
super(CheckMountDomainRsp, self).__init__()
self.active = False
class KvmResizeVolumeCommand(kvmagent.AgentCommand):
def __init__(self):
super(KvmResizeVolumeCommand, self).__init__()
self.vmUuid = None
self.size = None
self.deviceId = None
class KvmResizeVolumeRsp(kvmagent.AgentResponse):
def __init__(self):
super(KvmResizeVolumeRsp, self).__init__()
class UpdateVmPriorityRsp(kvmagent.AgentResponse):
def __init__(self):
super(UpdateVmPriorityRsp, self).__init__()
class BlockStreamResponse(kvmagent.AgentResponse):
def __init__(self):
super(BlockStreamResponse, self).__init__()
class AttachGuestToolsIsoToVmCmd(kvmagent.AgentCommand):
def __init__(self):
super(AttachGuestToolsIsoToVmCmd, self).__init__()
self.vmInstanceUuid = None
self.needTempDisk = None
class AttachGuestToolsIsoToVmRsp(kvmagent.AgentResponse):
def __init__(self):
super(AttachGuestToolsIsoToVmRsp, self).__init__()
class DetachGuestToolsIsoFromVmCmd(kvmagent.AgentCommand):
def __init__(self):
super(DetachGuestToolsIsoFromVmCmd, self).__init__()
self.vmInstanceUuid = None
class DetachGuestToolsIsoFromVmRsp(kvmagent.AgentResponse):
def __init__(self):
super(DetachGuestToolsIsoFromVmRsp, self).__init__()
class IsoTo(object):
def __init__(self):
super(IsoTo, self).__init__()
self.path = None
self.imageUuid = None
self.deviceId = None
class AttachIsoCmd(object):
def __init__(self):
super(AttachIsoCmd, self).__init__()
self.iso = None
self.vmUuid = None
class DetachIsoCmd(object):
def __init__(self):
super(DetachIsoCmd, self).__init__()
self.vmUuid = None
self.deviceId = None
class GetVmGuestToolsInfoCmd(kvmagent.AgentCommand):
def __init__(self):
super(GetVmGuestToolsInfoCmd, self).__init__()
self.vmInstanceUuid = None
class GetVmGuestToolsInfoRsp(kvmagent.AgentResponse):
def __init__(self):
super(GetVmGuestToolsInfoRsp, self).__init__()
self.version = None
self.status = None
class GetVmFirstBootDeviceCmd(kvmagent.AgentCommand):
def __init__(self):
super(GetVmFirstBootDeviceCmd, self).__init__()
self.uuid = None
class GetVmFirstBootDeviceRsp(kvmagent.AgentResponse):
def __init__(self):
super(GetVmFirstBootDeviceRsp, self).__init__()
self.firstBootDevice = None
class FailColoPrimaryVmCmd(kvmagent.AgentCommand):
@log.sensitive_fields("targetHostPassword")
def __init__(self):
super(FailColoPrimaryVmCmd, self).__init__()
self.vmInstanceUuid = None
self.targetHostIp = None
self.targetHostPort = None
self.targetHostPassword = None
class GetVmDeviceAddressRsp(kvmagent.AgentResponse):
def __init__(self):
super(GetVmDeviceAddressRsp, self).__init__()
self.addresses = {} # type:map[str, list[VmDeviceAddress]]
class VmDeviceAddress(object):
def __init__(self, uuid, device_type, address_type, address):
self.uuid = uuid
self.deviceType = device_type
self.addressType = address_type
self.address = address
class VncPortIptableRule(object):
def __init__(self):
self.host_ip = None
self.port = None
self.vm_internal_id = None
def _make_chain_name(self):
return "vm-%s-vnc" % self.vm_internal_id
@lock.file_lock('/run/xtables.lock')
def apply(self):
assert self.host_ip is not None
assert self.port is not None
assert self.vm_internal_id is not None
ipt = iptables.from_iptables_save()
chain_name = self._make_chain_name()
current_ip = linux.get_host_by_name(self.host_ip)
# get ipv4 subnet
current_ip_with_netmask = shell.call('ip -o -f inet addr show | awk \'/scope global/ {print $4}\' | fgrep -w %s' % current_ip).splitlines()[0]
if not current_ip_with_netmask:
err = 'cannot get host ip with netmask for %s' % self.host_ip
logger.warn(err)
raise kvmagent.KvmError(err)
ipt.add_rule('-A INPUT -p tcp -m tcp --dport %s -j %s' % (self.port, chain_name))
ipt.add_rule('-A %s -d %s -j ACCEPT' % (chain_name, current_ip_with_netmask))
ipt.add_rule('-A %s ! -d %s -j REJECT --reject-with icmp-host-prohibited' % (chain_name, current_ip_with_netmask))
ipt.iptable_restore()
@lock.file_lock('/run/xtables.lock')
def delete(self):
assert self.vm_internal_id is not None
ipt = iptables.from_iptables_save()
chain_name = self._make_chain_name()
ipt.delete_chain(chain_name)
ipt.iptable_restore()
def find_vm_internal_ids(self, vms):
internal_ids = []
namespace_used = is_namespace_used()
for vm in vms:
if namespace_used:
vm_id_node = find_zstack_metadata_node(etree.fromstring(vm.domain_xml), 'internalId')
if vm_id_node is None:
continue
vm_id = vm_id_node.text
else:
if not vm.domain_xmlobject.has_element('metadata.internalId'):
continue
vm_id = vm.domain_xmlobject.metadata.internalId.text_
if vm_id:
internal_ids.append(vm_id)
return internal_ids
@lock.file_lock('/run/xtables.lock')
def delete_stale_chains(self):
ipt = iptables.from_iptables_save()
tbl = ipt.get_table()
if not tbl:
ipt.iptable_restore()
return
vms = get_running_vms()
internal_ids = self.find_vm_internal_ids(vms)
# delete all vnc chains
chains = tbl.children[:]
for chain in chains:
if 'vm' in chain.name and 'vnc' in chain.name:
vm_internal_id = chain.name.split('-')[1]
if vm_internal_id not in internal_ids:
ipt.delete_chain(chain.name)
logger.debug('deleted a stale VNC iptable chain[%s]' % chain.name)
ipt.iptable_restore()
def e(parent, tag, value=None, attrib={}, usenamesapce = False):
if usenamesapce:
tag = '{%s}%s' % (ZS_XML_NAMESPACE, tag)
el = etree.SubElement(parent, tag, attrib)
if value:
el.text = value
return el
def find_namespace_node(root, path, name):
ns = {'zs': ZS_XML_NAMESPACE}
ps = path.split('.')
cnode = root
for p in ps:
cnode = cnode.find(p)
if cnode is None:
return None
return cnode.find('zs:%s' % name, ns)
def find_zstack_metadata_node(root, name):
zs = find_namespace_node(root, 'metadata', 'zstack')
if zs is None:
return None
return zs.find(name)
def find_domain_cdrom_address(domain_xml, target_dev):
domain_xmlobject = xmlobject.loads(domain_xml)
disks = domain_xmlobject.devices.get_children_nodes()['disk']
for d in disks:
if d.device_ != 'cdrom':
continue
if d.get_child_node('target').dev_ != target_dev:
continue
return d.get_child_node('address')
return None
def find_domain_first_boot_device(domain_xml):
domain_xmlobject = xmlobject.loads(domain_xml)
disks = domain_xmlobject.devices.get_child_node_as_list('disk')
ifaces = domain_xmlobject.devices.get_child_node_as_list('interface')
for d in disks:
if d.get_child_node('boot') is None:
continue
if d.device_ == 'disk' and d.get_child_node('boot').order_ == '1':
return "HardDisk"
if d.device_ == 'cdrom' and d.get_child_node('boot').order_ == '1':
return "CdRom"
for i in ifaces:
if i.get_child_node('boot') is None:
continue
if i.get_child_node('boot').order_ == '1':
return "Network"
devs = domain_xmlobject.os.get_child_node_as_list('boot')
if devs and devs[0].dev_ == 'cdrom':
return "CdRom"
return "HardDisk"
def compare_version(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
LIBVIRT_VERSION = linux.get_libvirt_version()
LIBVIRT_MAJOR_VERSION = LIBVIRT_VERSION.split('.')[0]
QEMU_VERSION = linux.get_qemu_version()
def is_namespace_used():
return compare_version(LIBVIRT_VERSION, '1.3.3') >= 0
def is_hv_freq_supported():
return compare_version(QEMU_VERSION, '2.12.0') >= 0
@linux.with_arch(todo_list=['x86_64'])
def is_ioapic_supported():
return compare_version(LIBVIRT_VERSION, '3.4.0') >= 0
def is_kylin402():
zstack_release = linux.read_file('/etc/zstack-release')
if zstack_release is None:
return False
return "kylin402" in zstack_release.splitlines()[0]
def is_spiceport_driver_supported():
# qemu-system-aarch64 not supported char driver: spiceport
return shell.run("%s -h | grep 'chardev spiceport'" % kvmagent.get_qemu_path()) == 0
def is_virtual_machine():
product_name = shell.call("dmidecode -s system-product-name").strip()
return product_name == "KVM Virtual Machine" or product_name == "KVM"
def get_domain_type():
return "qemu" if HOST_ARCH == "aarch64" and is_virtual_machine() else "kvm"
def get_gic_version(cpu_num):
kernel_release = platform.release().split("-")[0]
if is_kylin402() and cpu_num <= 8 and LooseVersion(kernel_release) < LooseVersion('4.15.0'):
return 2
# Occasionally, libvirt might fail to list VM ...
def get_console_without_libvirt(vmUuid):
output = bash.bash_o("""ps x | awk '/qemu[-]kvm.*%s/{print $1, index($0, " -vnc ")}'""" % vmUuid).splitlines()
if len(output) != 1:
return None, None, None, None
pid, idx = output[0].split()
output = bash.bash_o(
"""lsof -p %s -aPi4 | awk '$8 == "TCP" { n=split($9,a,":"); print a[n] }'""" % pid).splitlines()
if len(output) < 1:
logger.warn("get_port_without_libvirt: no port found")
return None, None, None, None
# There is a port in vnc, there may be one or two porters in the spice, and two or three ports may exist in vncAndSpice.
output = output.sort()
if len(output) == 1 and int(idx) == 0:
protocol = "spice"
return protocol, None, int(output[0]), None
if len(output) == 1 and int(idx) != 0:
protocol = "vnc"
return protocol, int(output[0]), None, None
if len(output) == 2 and int(idx) == 0:
protocol = "spice"
return protocol, None, int(output[0]), int(output[1])
if len(output) == 2 and int(idx) != 0:
protocol = "vncAndSpice"
return protocol, int(output[0]), int(output[1]), None
if len(output) == 3:
protocol = "vncAndSpice"
return protocol, int(output[0]), int(output[1]), int(output[2])
logger.warn("get_port_without_libvirt: more than 3 ports")
return None, None, None, None
def check_vdi_port(vncPort, spicePort, spiceTlsPort):
if vncPort is None and spicePort is None and spiceTlsPort is None:
return False
if vncPort is not None and vncPort <= 0:
return False
if spicePort is not None and spicePort <= 0:
return False
if spiceTlsPort is not None and spiceTlsPort <= 0:
return False
return True
# get domain/bus/slot/function from pci device address
def parse_pci_device_address(addr):
domain = '0000' if len(addr.split(":")) == 2 else addr.split(":")[0]
bus = addr.split(":")[-2]
slot = addr.split(":")[-1].split(".")[0]
function = addr.split(".")[-1]
return domain, bus, slot, function
def get_machineType(machine_type):
if HOST_ARCH == "aarch64":
return "virt"
return machine_type if machine_type else "pc"
def get_sgio_value():
device_name = [x for x in os.listdir("/sys/block") if not x.startswith("loop")][0]
return "unfiltered" if os.path.isfile("/sys/block/{}/queue/unpriv_sgio".format(device_name)) else "filtered"
class LibvirtAutoReconnect(object):
conn = libvirt.open('qemu:///system')
if not conn:
raise Exception('unable to get libvirt connection')
evtMgr = LibvirtEventManagerSingleton()
libvirt_event_callbacks = {}
def __init__(self, func):
self.func = func
self.exception = None
@staticmethod
def add_libvirt_callback(id, cb):
cbs = LibvirtAutoReconnect.libvirt_event_callbacks.get(id, None)
if cbs is None:
cbs = []
LibvirtAutoReconnect.libvirt_event_callbacks[id] = cbs
cbs.append(cb)
@staticmethod
def register_libvirt_callbacks():
def reboot_callback(conn, dom, opaque):
cbs = LibvirtAutoReconnect.libvirt_event_callbacks.get(libvirt.VIR_DOMAIN_EVENT_ID_REBOOT)
if not cbs:
return
for cb in cbs:
try:
cb(conn, dom, opaque)
except:
content = traceback.format_exc()
logger.warn(content)
LibvirtAutoReconnect.conn.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_REBOOT, reboot_callback,
None)
def lifecycle_callback(conn, dom, event, detail, opaque):
cbs = LibvirtAutoReconnect.libvirt_event_callbacks.get(libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE)
if not cbs:
return
for cb in cbs:
try:
cb(conn, dom, event, detail, opaque)
except:
content = traceback.format_exc()
logger.warn(content)
LibvirtAutoReconnect.conn.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
lifecycle_callback, None)
def libvirtClosedCallback(conn, reason, opaque):
reasonStrings = (
"Error", "End-of-file", "Keepalive", "Client",
)
logger.debug("got libvirt closed callback: %s: %s" % (conn.getURI(), reasonStrings[reason]))
LibvirtAutoReconnect.conn.registerCloseCallback(libvirtClosedCallback, None)
# NOTE: the keepalive doesn't work on some libvirtd even the versions are the same
# the error is like "the caller doesn't support keepalive protocol; perhaps it's missing event loop implementation"
# def start_keep_alive(_):
# try:
# LibvirtAutoReconnect.conn.setKeepAlive(5, 3)
# return True
# except Exception as e:
# logger.warn('unable to start libvirt keep-alive, %s' % str(e))
# return False
#
# if not linux.wait_callback_success(start_keep_alive, timeout=5, interval=0.5):
# raise Exception('unable to start libvirt keep-alive after 5 seconds, see the log for detailed error')
@lock.lock('libvirt-reconnect')
def _reconnect(self):
def test_connection():
try:
LibvirtAutoReconnect.conn.getLibVersion()
VmPlugin._reload_ceph_secret_keys()
return None
except libvirt.libvirtError as ex:
return ex
ex = test_connection()
if not ex:
# the connection is ok
return
# 2nd version: 2015
logger.warn("the libvirt connection is broken, there is no safeway to auto-reconnect without fd leak, we"
" will ask the mgmt server to reconnect us after self quit")
_stop_world()
# old_conn = LibvirtAutoReconnect.conn
# LibvirtAutoReconnect.conn = libvirt.open('qemu:///system')
# if not LibvirtAutoReconnect.conn:
# raise Exception('unable to get a libvirt connection')
#
# for cid in LibvirtAutoReconnect.callback_id:
# logger.debug("remove libvirt event callback[id:%s]" % cid)
# old_conn.domainEventDeregisterAny(cid)
#
# # stop old event manager
# LibvirtAutoReconnect.evtMgr.stop()
# # create a new event manager
# LibvirtAutoReconnect.evtMgr = LibvirtEventManager()
# LibvirtAutoReconnect.register_libvirt_callbacks()
#
# # try to close the old connection anyway
# try:
# old_conn.close()
# except Exception as ee:
# logger.warn('unable to close an old libvirt exception, %s' % str(ee))
# finally:
# del old_conn
#
# ex = test_connection()
# if ex:
# # unable to reconnect, raise the error
# raise Exception('unable to get a libvirt connection, %s' % str(ex))
#
# logger.debug('successfully reconnected to the libvirt')
def __call__(self, *args, **kwargs):
try:
return self.func(LibvirtAutoReconnect.conn)
except libvirt.libvirtError as ex:
err = str(ex)
if 'client socket is closed' in err or 'Broken pipe' in err or 'invalid connection' in err:
logger.debug('socket to the libvirt is broken[%s], try reconnecting' % err)
self._reconnect()
return self.func(LibvirtAutoReconnect.conn)
else:
raise
class IscsiLogin(object):
def __init__(self):
self.server_hostname = None
self.server_port = None
self.target = None
self.chap_username = None
self.chap_password = None
self.lun = 1
@lock.lock('iscsiadm')
def login(self):
assert self.server_hostname, "hostname cannot be None"
assert self.server_port, "port cannot be None"
assert self.target, "target cannot be None"
device_path = os.path.join('/dev/disk/by-path/', 'ip-%s:%s-iscsi-%s-lun-%s' % (
self.server_hostname, self.server_port, self.target, self.lun))
shell.call('iscsiadm -m discovery -t sendtargets -p %s:%s' % (self.server_hostname, self.server_port))
if self.chap_username and self.chap_password:
shell.call(
'iscsiadm --mode node --targetname "%s" -p %s:%s --op=update --name node.session.auth.authmethod --value=CHAP' % (
self.target, self.server_hostname, self.server_port))
shell.call(
'iscsiadm --mode node --targetname "%s" -p %s:%s --op=update --name node.session.auth.username --value=%s' % (
self.target, self.server_hostname, self.server_port, self.chap_username))
shell.call(
'iscsiadm --mode node --targetname "%s" -p %s:%s --op=update --name node.session.auth.password --value=%s' % (
self.target, self.server_hostname, self.server_port, self.chap_password))
shell.call('iscsiadm --mode node --targetname "%s" -p %s:%s --login' % (
self.target, self.server_hostname, self.server_port))
def wait_device_to_show(_):
return os.path.exists(device_path)
if not linux.wait_callback_success(wait_device_to_show, timeout=30, interval=0.5):
raise Exception('ISCSI device[%s] is not shown up after 30s' % device_path)
return device_path
class BlkIscsi(object):
def __init__(self):
self.is_cdrom = None
self.volume_uuid = None
self.chap_username = None
self.chap_password = None
self.device_letter = None
self.addressBus = None
self.addressUnit = None
self.server_hostname = None
self.server_port = None
self.target = None
self.lun = None
def _login_portal(self):
login = IscsiLogin()
login.server_hostname = self.server_hostname
login.server_port = self.server_port
login.target = self.target
login.chap_username = self.chap_username
login.chap_password = self.chap_password
return login.login()
def to_xmlobject(self):
# type: () -> etree.Element
device_path = self._login_portal()
if self.is_cdrom:
root = etree.Element('disk', {'type': 'block', 'device': 'cdrom'})
e(root, 'driver', attrib={'name': 'qemu', 'type': 'raw', 'cache': 'none'})
e(root, 'source', attrib={'dev': device_path})
e(root, 'target', attrib={'dev': self.device_letter})
if self.addressBus and self.addressUnit:
e(root, 'address', None,{'type' : 'drive', 'bus' : self.addressBus, 'unit' : self.addressUnit})
else:
root = etree.Element('disk', {'type': 'block', 'device': 'lun'})
e(root, 'driver', attrib={'name': 'qemu', 'type': 'raw', 'cache': 'none', 'discard':'unmap'})
e(root, 'source', attrib={'dev': device_path})
e(root, 'target', attrib={'dev': 'sd%s' % self.device_letter})
return root
@staticmethod
@lock.lock('iscsiadm')
def logout_portal(dev_path):
if not os.path.exists(dev_path):
return
device = os.path.basename(dev_path)
portal = device[3:device.find('-iscsi')]
target = device[device.find('iqn'):device.find('-lun')]
try:
shell.call('iscsiadm -m node --targetname "%s" --portal "%s" --logout' % (target, portal))
except Exception as e:
logger.warn('failed to logout device[%s], %s' % (dev_path, str(e)))
class IsoCeph(object):
def __init__(self):
self.iso = None
def to_xmlobject(self, target_dev, target_bus_type, bus=None, unit=None, bootOrder=None):
disk = etree.Element('disk', {'type': 'network', 'device': 'cdrom'})
source = e(disk, 'source', None, {'name': self.iso.path.lstrip('ceph:').lstrip('//'), 'protocol': 'rbd'})
if self.iso.secretUuid:
auth = e(disk, 'auth', attrib={'username': 'zstack'})
e(auth, 'secret', attrib={'type': 'ceph', 'uuid': self.iso.secretUuid})
for minfo in self.iso.monInfo:
e(source, 'host', None, {'name': minfo.hostname, 'port': str(minfo.port)})
e(disk, 'target', None, {'dev': target_dev, 'bus': target_bus_type})
if bus and unit:
e(disk, 'address', None, {'type': 'drive', 'bus': bus, 'unit': unit})
e(disk, 'readonly', None)
if bootOrder is not None and bootOrder > 0:
e(disk, 'boot', None, {'order': str(bootOrder)})
return disk
class BlkCeph(object):
def __init__(self):
self.volume = None
self.dev_letter = None
self.bus_type = None
def to_xmlobject(self):
disk = etree.Element('disk', {'type': 'network', 'device': 'disk'})
source = e(disk, 'source', None,
{'name': self.volume.installPath.lstrip('ceph:').lstrip('//'), 'protocol': 'rbd'})
if self.volume.secretUuid:
auth = e(disk, 'auth', attrib={'username': 'zstack'})
e(auth, 'secret', attrib={'type': 'ceph', 'uuid': self.volume.secretUuid})
for minfo in self.volume.monInfo:
e(source, 'host', None, {'name': minfo.hostname, 'port': str(minfo.port)})
dev_format = Vm._get_disk_target_dev_format(self.bus_type)
e(disk, 'target', None, {'dev': dev_format % self.dev_letter, 'bus': self.bus_type})
if self.volume.physicalBlockSize:
e(disk, 'blockio', None, {'physical_block_size': str(self.volume.physicalBlockSize)})
return disk
class VirtioCeph(object):
def __init__(self):
self.volume = None
self.dev_letter = None
def to_xmlobject(self):
disk = etree.Element('disk', {'type': 'network', 'device': 'disk'})
source = e(disk, 'source', None,
{'name': self.volume.installPath.lstrip('ceph:').lstrip('//'), 'protocol': 'rbd'})
if self.volume.secretUuid:
auth = e(disk, 'auth', attrib={'username': 'zstack'})
e(auth, 'secret', attrib={'type': 'ceph', 'uuid': self.volume.secretUuid})
for minfo in self.volume.monInfo:
e(source, 'host', None, {'name': minfo.hostname, 'port': str(minfo.port)})
e(disk, 'target', None, {'dev': 'vd%s' % self.dev_letter, 'bus': 'virtio'})
if self.volume.physicalBlockSize:
e(disk, 'blockio', None, {'physical_block_size': str(self.volume.physicalBlockSize)})
return disk
class VirtioSCSICeph(object):
def __init__(self):
self.volume = None
self.dev_letter = None
def to_xmlobject(self):
disk = etree.Element('disk', {'type': 'network', 'device': 'disk'})
source = e(disk, 'source', None,
{'name': self.volume.installPath.lstrip('ceph:').lstrip('//'), 'protocol': 'rbd'})
if self.volume.secretUuid:
auth = e(disk, 'auth', attrib={'username': 'zstack'})
e(auth, 'secret', attrib={'type': 'ceph', 'uuid': self.volume.secretUuid})
for minfo in self.volume.monInfo:
e(source, 'host', None, {'name': minfo.hostname, 'port': str(minfo.port)})
e(disk, 'target', None, {'dev': 'sd%s' % self.dev_letter, 'bus': 'scsi'})
e(disk, 'wwn', self.volume.wwn)
if self.volume.shareable:
e(disk, 'driver', None, {'name': 'qemu', 'type': 'raw', 'cache': 'none'})
e(disk, 'shareable')
if self.volume.physicalBlockSize:
e(disk, 'blockio', None, {'physical_block_size': str(self.volume.physicalBlockSize)})
return disk
class VirtioIscsi(object):
def __init__(self):
self.volume_uuid = None
self.chap_username = None
self.chap_password = None
self.device_letter = None
self.server_hostname = None
self.server_port = None
self.target = None
self.lun = None
def to_xmlobject(self):
root = etree.Element('disk', {'type': 'network', 'device': 'disk'})
e(root, 'driver', attrib={'name': 'qemu', 'type': 'raw', 'cache': 'none', 'discard':'unmap'})
if self.chap_username and self.chap_password:
auth = e(root, 'auth', attrib={'username': self.chap_username})
e(auth, 'secret', attrib={'type': 'iscsi', 'uuid': self._get_secret_uuid()})
source = e(root, 'source', attrib={'protocol': 'iscsi', 'name': '%s/%s' % (self.target, self.lun)})
e(source, 'host', attrib={'name': self.server_hostname, 'port': self.server_port})
e(root, 'target', attrib={'dev': 'sd%s' % self.device_letter, 'bus': 'scsi'})
e(root, 'shareable')
return root
def _get_secret_uuid(self):
root = etree.Element('secret', {'ephemeral': 'yes', 'private': 'yes'})
e(root, 'description', self.volume_uuid)
usage = e(root, 'usage', attrib={'type': 'iscsi'})
e(usage, 'target', self.target)
xml = etree.tostring(root)
logger.debug('create secret for virtio-iscsi volume:\n%s\n' % xml)
@LibvirtAutoReconnect
def call_libvirt(conn):
return conn.secretDefineXML(xml)
secret = call_libvirt()
secret.setValue(self.chap_password)
return secret.UUIDString()
@linux.retry(times=3, sleep_time=1)
def get_connect(src_host_ip):
conn = libvirt.open('qemu+tcp://{0}/system'.format(src_host_ip))
if conn is None:
logger.warn('unable to connect qemu on host {0}'.format(src_host_ip))
raise kvmagent.KvmError('unable to connect qemu on host %s' % (src_host_ip))
return conn
def get_vm_by_uuid(uuid, exception_if_not_existing=True, conn=None):
try:
# libvirt may not be able to find a VM when under a heavy workload, we re-try here
@LibvirtAutoReconnect
def call_libvirt(conn):
return conn.lookupByName(uuid)
@linux.retry(times=3, sleep_time=1)
def retry_call_libvirt():
if conn is None:
return call_libvirt()
else:
return conn.lookupByName(uuid)
vm = Vm.from_virt_domain(retry_call_libvirt())
logger.debug("find xm xml: %s" % vm.domain_xml)
return vm
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
if exception_if_not_existing:
raise kvmagent.KvmError('unable to find vm[uuid:%s]' % uuid)
else:
return None
err = 'error happened when looking up vm[uuid:%(uuid)s], libvirt error code: %(error_code)s, %(e)s' % locals()
raise libvirt.libvirtError(err)
def get_vm_by_uuid_no_retry(uuid, exception_if_not_existing=True):
try:
# do not retry to fix create vm slow issue 4175
@LibvirtAutoReconnect
def call_libvirt(conn):
return conn.lookupByName(uuid)
vm = Vm.from_virt_domain(call_libvirt())
return vm
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
if exception_if_not_existing:
raise kvmagent.KvmError('unable to find vm[uuid:%s]' % uuid)
else:
return None
err = 'error happened when looking up vm[uuid:%(uuid)s], libvirt error code: %(error_code)s, %(e)s' % locals()
raise libvirt.libvirtError(err)
def get_active_vm_uuids_states():
@LibvirtAutoReconnect
def call_libvirt(conn):
return conn.listDomainsID()
ids = call_libvirt()
uuids_states = {}
uuids_vmInShutdown = []
@LibvirtAutoReconnect
def get_domain(conn):
# i is for..loop's control variable
# it's Python's local scope tricky
try:
return conn.lookupByID(i)
except libvirt.libvirtError as ex:
if ex.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return None
raise ex
for i in ids:
domain = get_domain()
if domain == None:
continue
uuid = domain.name()
if uuid.startswith("guestfs-"):
logger.debug("ignore the temp vm generate by guestfish.")
continue
if uuid == "ZStack Management Node VM":
logger.debug("ignore the vm used for MN HA.")
continue
(state, _, _, _, _) = domain.info()
if state == Vm.VIR_DOMAIN_SHUTDOWN:
uuids_vmInShutdown.append(uuid)
state = Vm.power_state[state]
# or use
uuids_states[uuid] = state
return uuids_states, uuids_vmInShutdown
def get_all_vm_states():
return get_active_vm_uuids_states()[0]
def get_all_vm_sync_states():
return get_active_vm_uuids_states()
def get_running_vms():
@LibvirtAutoReconnect
def get_all_ids(conn):
return conn.listDomainsID()
ids = get_all_ids()
vms = []
@LibvirtAutoReconnect
def get_domain(conn):
try:
return conn.lookupByID(i)
except libvirt.libvirtError as ex:
if ex.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return None
raise ex
for i in ids:
domain = get_domain()
if domain == None:
continue
vm = Vm.from_virt_domain(domain)
vms.append(vm)
return vms
def get_cpu_memory_used_by_running_vms():
runnings = get_running_vms()
used_cpu = 0
used_memory = 0
for vm in runnings:
used_cpu += vm.get_cpu_num()
used_memory += vm.get_memory()
return (used_cpu, used_memory)
def cleanup_stale_vnc_iptable_chains():
VncPortIptableRule().delete_stale_chains()
def shared_block_to_file(sbkpath):
return sbkpath.replace("sharedblock:/", "/dev")
class VmOperationJudger(object):
def __init__(self, op):
self.op = op
self.expected_events = {}
if self.op == VmPlugin.VM_OP_START:
self.expected_events[LibvirtEventManager.EVENT_STARTED] = LibvirtEventManager.EVENT_STARTED
elif self.op == VmPlugin.VM_OP_MIGRATE:
self.expected_events[LibvirtEventManager.EVENT_STOPPED] = LibvirtEventManager.EVENT_STOPPED
elif self.op == VmPlugin.VM_OP_STOP:
self.expected_events[LibvirtEventManager.EVENT_STOPPED] = LibvirtEventManager.EVENT_STOPPED
elif self.op == VmPlugin.VM_OP_DESTROY:
self.expected_events[LibvirtEventManager.EVENT_STOPPED] = LibvirtEventManager.EVENT_STOPPED
elif self.op == VmPlugin.VM_OP_REBOOT:
self.expected_events[LibvirtEventManager.EVENT_STARTED] = LibvirtEventManager.EVENT_STARTED
self.expected_events[LibvirtEventManager.EVENT_STOPPED] = LibvirtEventManager.EVENT_STOPPED
elif self.op == VmPlugin.VM_OP_SUSPEND:
self.expected_events[LibvirtEventManager.EVENT_SUSPENDED] = LibvirtEventManager.EVENT_SUSPENDED
elif self.op == VmPlugin.VM_OP_RESUME:
self.expected_events[LibvirtEventManager.EVENT_RESUMED] = LibvirtEventManager.EVENT_RESUMED
else:
raise Exception('unknown vm operation[%s]' % self.op)
def remove_expected_event(self, evt):
del self.expected_events[evt]
return len(self.expected_events)
def ignore_libvirt_events(self):
if self.op == VmPlugin.VM_OP_START:
return [LibvirtEventManager.EVENT_STARTED]
elif self.op == VmPlugin.VM_OP_MIGRATE:
return [LibvirtEventManager.EVENT_STOPPED, LibvirtEventManager.EVENT_UNDEFINED]
elif self.op == VmPlugin.VM_OP_STOP:
return [LibvirtEventManager.EVENT_STOPPED, LibvirtEventManager.EVENT_SHUTDOWN]
elif self.op == VmPlugin.VM_OP_DESTROY:
return [LibvirtEventManager.EVENT_STOPPED, LibvirtEventManager.EVENT_SHUTDOWN,
LibvirtEventManager.EVENT_UNDEFINED]
elif self.op == VmPlugin.VM_OP_REBOOT:
return [LibvirtEventManager.EVENT_STARTED, LibvirtEventManager.EVENT_STOPPED]
else:
raise Exception('unknown vm operation[%s]' % self.op)
def make_spool_conf(imgfmt, dev_letter, volume):
d = tempfile.gettempdir()
fname = "{0}_{1}".format(os.path.basename(volume.installPath), dev_letter)
fpath = os.path.join(d, fname) + ".conf"
vsize, _ = linux.qcow2_size_and_actual_size(volume.installPath)
with open(fpath, "w") as fd:
fd.write("device_type 0\n")
fd.write("local_storage_type 0\n")
fd.write("device_owner blockpmd\n")
fd.write("device_format {0}\n".format(imgfmt))
fd.write("cluster_id 1000\n")
fd.write("device_id {0}\n".format(ord(dev_letter)))
fd.write("device_uuid {0}\n".format(fname))
fd.write("mount_point {0}\n".format(volume.installPath))
fd.write("device_size {0}\n".format(vsize))
os.chmod(fpath, 0o600)
return fpath
def is_spice_tls():
return bash.bash_r("grep '^[[:space:]]*spice_tls[[:space:]]*=[[:space:]]*1' /etc/libvirt/qemu.conf")
def get_dom_error(uuid):
try:
domblkerror = shell.call('virsh domblkerror %s' % uuid)
except:
return None
if 'No errors found' in domblkerror:
return None
return domblkerror.replace('\n', '')
class Vm(object):
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
VM_STATE_NO_STATE = 'NoState'
VM_STATE_RUNNING = 'Running'
VM_STATE_PAUSED = 'Paused'
VM_STATE_SHUTDOWN = 'Shutdown'
VM_STATE_CRASHED = 'Crashed'
VM_STATE_SUSPENDED = 'Suspended'
ALLOW_SNAPSHOT_STATE = (VM_STATE_RUNNING, VM_STATE_PAUSED, VM_STATE_SHUTDOWN)
power_state = {
VIR_DOMAIN_NOSTATE: VM_STATE_NO_STATE,
VIR_DOMAIN_RUNNING: VM_STATE_RUNNING,
VIR_DOMAIN_BLOCKED: VM_STATE_RUNNING,
VIR_DOMAIN_PAUSED: VM_STATE_PAUSED,
VIR_DOMAIN_SHUTDOWN: VM_STATE_SHUTDOWN,
VIR_DOMAIN_SHUTOFF: VM_STATE_SHUTDOWN,
VIR_DOMAIN_CRASHED: VM_STATE_CRASHED,
VIR_DOMAIN_PMSUSPENDED: VM_STATE_SUSPENDED,
}
# IDE and SATA is not supported in aarch64/i440fx
# so cdroms and volumes need to share sd[a-z]
#
# IDE is supported in x86_64/i440fx
# so cdroms use hd[c-e]
# virtio and virtioSCSI volumes share (sd[a-z] - sdc)
device_letter_config = {
'aarch64': 'abfghijklmnopqrstuvwxyz',
'mips64el': 'abfghijklmnopqrstuvwxyz',
'x86_64': 'abdefghijklmnopqrstuvwxyz'
}
DEVICE_LETTERS = device_letter_config[HOST_ARCH]
ISO_DEVICE_LETTERS = 'cde'
timeout_detached_vol = set()
@staticmethod
def get_device_unit(device_id):
# type: (int) -> int
if device_id >= len(Vm.DEVICE_LETTERS):
err = "exceeds max disk limit, device id[%s], but only 0 ~ %d are allowed" % (device_id, len(Vm.DEVICE_LETTERS) - 1)
logger.warn(err)
raise kvmagent.KvmError(err)
# e.g. sda -> unit 0 sdf -> unit 5, same as libvirt
return ord(Vm.DEVICE_LETTERS[device_id]) - ord(Vm.DEVICE_LETTERS[0])
@staticmethod
def get_iso_device_unit(device_id):
if device_id >= len(Vm.ISO_DEVICE_LETTERS):
err = "exceeds max iso limit, device id[%s], but only 0 ~ %d are allowed" % (device_id, len(Vm.ISO_DEVICE_LETTERS) - 1)
logger.warn(err)
raise kvmagent.KvmError(err)
return str(ord(Vm.ISO_DEVICE_LETTERS[device_id]) - ord(Vm.DEVICE_LETTERS[0]))
timeout_object = linux.TimeoutObject()
@staticmethod
def set_device_address(disk_element, vol, vm_to_attach=None):
# type: (etree.Element, jsonobject.JsonObject, Vm) -> None
target = disk_element.find('target')
bus = target.get('bus') if target is not None else None
if bus == 'scsi':
occupied_units = vm_to_attach.get_occupied_disk_address_units(bus='scsi', controller=0) if vm_to_attach else []
default_unit = Vm.get_device_unit(vol.deviceId)
unit = default_unit if default_unit not in occupied_units else max(occupied_units) + 1
e(disk_element, 'address', None, {'type': 'drive', 'controller': '0', 'unit': str(unit)})
def __init__(self):
self.uuid = None
self.domain_xmlobject = None
self.domain_xml = None
self.domain = None
self.state = None
def wait_for_state_change(self, state):
try:
self.refresh()
except Exception as e:
if not state:
return True
raise e
if isinstance(state, list):
return self.state in state
else:
return self.state == state
def get_occupied_disk_address_units(self, bus, controller):
# type: (str, int) -> list[int]
result = []
for disk in self.domain_xmlobject.devices.get_child_node_as_list('disk'):
if not xmlobject.has_element(disk, 'address') or not xmlobject.has_element(disk, 'target'):
continue
if not disk.target.bus__ or not disk.target.bus_ == bus:
continue
if not disk.address.controller__ or not str(disk.address.controller_) == str(controller):
continue
result.append(int(disk.address.unit_))
return result
def get_cpu_num(self):
cpuNum = self.domain_xmlobject.vcpu.current__
if cpuNum:
return int(cpuNum)
else:
return int(self.domain_xmlobject.vcpu.text_)
def get_cpu_speed(self):
cputune = self.domain_xmlobject.get_child_node('cputune')
if cputune:
return int(cputune.shares.text_) / self.get_cpu_num()
else:
# TODO: return system cpu capacity
return 512
def get_memory(self):
return long(self.domain_xmlobject.currentMemory.text_) * 1024
def get_name(self):
return self.domain_xmlobject.description.text_
def refresh(self):
(state, _, _, _, _) = self.domain.info()
self.state = self.power_state[state]
self.domain_xml = self.domain.XMLDesc(0)
self.domain_xmlobject = xmlobject.loads(self.domain_xml)
self.uuid = self.domain_xmlobject.name.text_
def is_alive(self):
try:
self.domain.info()
return True
except:
return False
def _wait_for_vm_running(self, timeout=60, wait_console=True):
if not linux.wait_callback_success(self.wait_for_state_change, [self.VM_STATE_RUNNING, self.VM_STATE_PAUSED], interval=0.5,
timeout=timeout):
raise kvmagent.KvmError('unable to start vm[uuid:%s, name:%s], vm state is not changing to '
'running/paused after %s seconds' % (self.uuid, self.get_name(), timeout))
if not wait_console:
return
vnc_port = self.get_console_port()
def wait_vnc_port_open(_):
cmd = shell.ShellCmd('netstat -na | grep ":%s" > /dev/null' % vnc_port)
cmd(is_exception=False)
return cmd.return_code == 0
if not linux.wait_callback_success(wait_vnc_port_open, None, interval=0.5, timeout=30):
raise kvmagent.KvmError("unable to start vm[uuid:%s, name:%s]; its vnc port does"
" not open after 30 seconds" % (self.uuid, self.get_name()))
def _wait_for_vm_paused(self, timeout=60):
if not linux.wait_callback_success(self.wait_for_state_change, self.VM_STATE_PAUSED, interval=0.5,
timeout=timeout):
raise kvmagent.KvmError('unable to start vm[uuid:%s, name:%s], vm state is not changing to '
'paused after %s seconds' % (self.uuid, self.get_name(), timeout))
def reboot(self, cmd):
self.stop(timeout=cmd.timeout)
# set boot order
boot_dev = []
for bdev in cmd.bootDev:
xo = xmlobject.XmlObject('boot')
xo.put_attr('dev', bdev)
boot_dev.append(xo)
self.domain_xmlobject.os.replace_node('boot', boot_dev)
self.domain_xml = self.domain_xmlobject.dump()
self.start(cmd.timeout)
def restore(self, path):
@LibvirtAutoReconnect
def restore_from_file(conn):
return conn.restoreFlags(path, self.domain_xml)
restore_from_file()
def start(self, timeout=60, create_paused=False, wait_console=True):
# TODO: 1. enable hair_pin mode
logger.debug('creating vm:\n%s' % self.domain_xml)
@LibvirtAutoReconnect
def define_xml(conn):
return conn.defineXML(self.domain_xml)
flag = (0, libvirt.VIR_DOMAIN_START_PAUSED)[create_paused]
domain = define_xml()
self.domain = domain
self.domain.createWithFlags(flag)
if create_paused:
self._wait_for_vm_paused(timeout)
else:
self._wait_for_vm_running(timeout, wait_console)
def stop(self, strategy='grace', timeout=5, undefine=True):
def cleanup_addons():
for chan in self.domain_xmlobject.devices.get_child_node_as_list('channel'):
if chan.type_ == 'unix':
path = chan.source.path_
linux.rm_file_force(path)
def loop_shutdown(_):
try:
self.domain.shutdown()
except:
# domain has been shut down
pass
try:
return self.wait_for_state_change(self.VM_STATE_SHUTDOWN)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
return True
else:
raise
def iscsi_cleanup():
disks = self.domain_xmlobject.devices.get_child_node_as_list('disk')
for disk in disks:
if disk.type_ == 'block' and disk.device_ == 'lun':
BlkIscsi.logout_portal(disk.source.dev_)
def loop_undefine(_):
if not undefine:
return True
if not self.is_alive():
return True
def force_undefine():
try:
self.domain.undefine()
except:
logger.warn('cannot undefine the VM[uuid:%s]' % self.uuid)
pid = linux.find_process_by_cmdline(['qemu', self.uuid])
if pid:
# force to kill the VM
linux.kill_process(pid, is_exception=False)
try:
flags = 0
for attr in [ "VIR_DOMAIN_UNDEFINE_MANAGED_SAVE", "VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA", "VIR_DOMAIN_UNDEFINE_NVRAM" ]:
if hasattr(libvirt, attr):
flags |= getattr(libvirt, attr)
self.domain.undefineFlags(flags)
except libvirt.libvirtError as ex:
logger.warn('undefine domain[%s] failed: %s' % (self.uuid, str(ex)))
force_undefine()
return self.wait_for_state_change(None)
def loop_destroy(_):
try:
self.domain.destroy()
except:
# domain has been destroyed
pass
try:
return self.wait_for_state_change(self.VM_STATE_SHUTDOWN)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
return True
else:
raise
do_destroy, isPersistent = strategy == 'grace' or strategy == 'cold', self.domain.isPersistent()
if strategy == 'grace':
if linux.wait_callback_success(loop_shutdown, None, timeout=60):
do_destroy = False
iscsi_cleanup()
if do_destroy:
if not linux.wait_callback_success(loop_destroy, None, timeout=60):
logger.warn('failed to destroy vm, timeout after 60 secs')
raise kvmagent.KvmError('failed to stop vm, timeout after 60 secs')
cleanup_addons()
if strategy == 'force':
pid = linux.find_process_by_cmdline(['qemu', self.uuid])
if pid:
# force to kill the VM
try:
linux.kill_process(int(pid), 60, True, False)
except Exception as e:
logger.warn('failed to kill vm, timeout after 60 secs')
raise kvmagent.KvmError('failed to kill vm, timeout after 60 secs')
return
# undefine domain only if it is persistent
if not isPersistent:
return
if not linux.wait_callback_success(loop_undefine, None, timeout=60):
logger.warn('failed to undefine vm, timeout after 60 secs')
raise kvmagent.KvmError('failed to stop vm, timeout after 60 secs')
def destroy(self):
self.stop(strategy='cold')
def pause(self, timeout=5):
def loop_suspend(_):
try:
self.domain.suspend()
except:
pass
try:
return self.wait_for_state_change(self.VM_STATE_PAUSED)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
return True
else:
raise
if not linux.wait_callback_success(loop_suspend, None, timeout=10):
raise kvmagent.KvmError('failed to suspend vm ,timeout after 10 secs')
def resume(self, timeout=5):
def loop_resume(_):
try:
self.domain.resume()
except:
pass
try:
return self.wait_for_state_change(self.VM_STATE_RUNNING)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
return True
else:
raise
if not linux.wait_callback_success(loop_resume, None, timeout=60):
domblkerror = get_dom_error(self.uuid)
if domblkerror is None:
raise kvmagent.KvmError('failed to resume vm ,timeout after 60 secs')
else:
raise kvmagent.KvmError('failed to resume vm , because %s' % domblkerror)
def harden_console(self, mgmt_ip):
if is_namespace_used():
id_node = find_zstack_metadata_node(etree.fromstring(self.domain_xml), 'internalId')
id = id_node.text
else:
id = self.domain_xmlobject.metadata.internalId.text_
vir = VncPortIptableRule()
vir.vm_internal_id = id
vir.delete()
vir.host_ip = mgmt_ip
vir.port = self.get_console_port()
vir.apply()
def get_vdi_connect_port(self):
rsp = GetVncPortResponse()
for g in self.domain_xmlobject.devices.get_child_node_as_list('graphics'):
if g.type_ == 'vnc':
rsp.vncPort = g.port_
rsp.protocol = "vnc"
elif g.type_ == 'spice':
rsp.spicePort = g.port_
if g.hasattr('tlsPort_'):
rsp.spiceTlsPort = g.tlsPort_
rsp.protocol = "spice"
if rsp.vncPort is not None and rsp.spicePort is not None:
rsp.protocol = "vncAndSpice"
return rsp.protocol, rsp.vncPort, rsp.spicePort, rsp.spiceTlsPort
def get_console_port(self):
for g in self.domain_xmlobject.devices.get_child_node_as_list('graphics'):
if g.type_ == 'vnc' or g.type_ == 'spice':
return g.port_
def get_console_protocol(self):
for g in self.domain_xmlobject.devices.get_child_node_as_list('graphics'):
if g.type_ == 'vnc' or g.type_ == 'spice':
return g.type_
raise kvmagent.KvmError('no vnc console defined for vm[uuid:%s]' % self.uuid)
def attach_data_volume(self, volume, addons):
self._wait_vm_run_until_seconds(10)
self.timeout_object.wait_until_object_timeout('detach-volume-%s' % self.uuid)
self._attach_data_volume(volume, addons)
self.timeout_object.put('attach-volume-%s' % self.uuid, timeout=10)
@staticmethod
def set_volume_qos(addons, volumeUuid, volume_xml_obj):
if not addons:
return
for key in ["VolumeQos", "VolumeReadQos", "VolumeWriteQos"]:
vol_qos = addons[key]
if not vol_qos:
continue
qos = vol_qos[volumeUuid]
if not qos:
continue
if not qos.totalBandwidth and not qos.totalIops:
continue
mode = None
if key == 'VolumeQos':
mode = "total"
elif key == 'VolumeReadQos':
mode = "read"
elif key == 'VolumeWriteQos':
mode = "write"
iotune = e(volume_xml_obj, 'iotune')
if qos.totalBandwidth:
virsh_key = "%s_bytes_sec" % mode
e(iotune, virsh_key, str(qos.totalBandwidth))
if qos.totalIops:
virsh_key = "%s_iops_sec" % mode
e(iotune, virsh_key, str(qos.totalIops))
@staticmethod
def set_volume_serial_id(vol_uuid, volume_xml_obj):
if volume_xml_obj.get('type') != 'block' or volume_xml_obj.get('device') != 'lun':
e(volume_xml_obj, 'serial', vol_uuid)
def _attach_data_volume(self, volume, addons):
if volume.deviceId >= len(self.DEVICE_LETTERS):
err = "vm[uuid:%s] exceeds max disk limit, device id[%s], but only 0 ~ %d are allowed" % (self.uuid, volume.deviceId, len(self.DEVICE_LETTERS) - 1)
logger.warn(err)
raise kvmagent.KvmError(err)
def volume_native_aio(volume_xml_obj):
if not addons:
return
vol_aio = addons['NativeAio']
if not vol_aio:
return
drivers = volume_xml_obj.getiterator("driver")
if drivers is None or len(drivers) == 0:
return
drivers[0].set("io", "native")
def filebased_volume():
disk = etree.Element('disk', attrib={'type': 'file', 'device': 'disk'})
e(disk, 'driver', None, {'name': 'qemu', 'type': linux.get_img_fmt(volume.installPath), 'cache': volume.cacheMode})
e(disk, 'source', None, {'file': volume.installPath})
if volume.shareable:
e(disk, 'shareable')
if volume.useVirtioSCSI:
e(disk, 'target', None, {'dev': 'sd%s' % dev_letter, 'bus': 'scsi'})
e(disk, 'wwn', volume.wwn)
elif volume.useVirtio:
e(disk, 'target', None, {'dev': 'vd%s' % self.DEVICE_LETTERS[volume.deviceId], 'bus': 'virtio'})
else:
bus_type = self._get_controller_type()
dev_format = Vm._get_disk_target_dev_format(bus_type)
e(disk, 'target', None, {'dev': dev_format % dev_letter, 'bus': bus_type})
return disk
def scsilun_volume():
# default value of sgio is 'filtered'
#NOTE(weiw): scsi lun not support aio or qos
disk = etree.Element('disk', attrib={'type': 'block', 'device': 'lun', 'sgio': get_sgio_value()})
e(disk, 'driver', None, {'name': 'qemu', 'type': 'raw'})
e(disk, 'source', None, {'dev': volume.installPath})
e(disk, 'target', None, {'dev': 'sd%s' % dev_letter, 'bus': 'scsi'})
return disk
def iscsibased_volume():
# type: () -> etree.Element
def virtio_iscsi():
vi = VirtioIscsi()
portal, vi.target, vi.lun = volume.installPath.lstrip('iscsi://').split('/')
vi.server_hostname, vi.server_port = portal.split(':')
vi.device_letter = dev_letter
vi.volume_uuid = volume.volumeUuid
vi.chap_username = volume.chapUsername
vi.chap_password = volume.chapPassword
return vi.to_xmlobject()
def blk_iscsi():
bi = BlkIscsi()
portal, bi.target, bi.lun = volume.installPath.lstrip('iscsi://').split('/')
bi.server_hostname, bi.server_port = portal.split(':')
bi.device_letter = dev_letter
bi.volume_uuid = volume.volumeUuid
bi.chap_username = volume.chapUsername
bi.chap_password = volume.chapPassword
return bi.to_xmlobject()
if volume.useVirtio:
return virtio_iscsi()
else:
return blk_iscsi()
def ceph_volume():
# type: () -> etree.Element
def virtoio_ceph():
vc = VirtioCeph()
vc.volume = volume
vc.dev_letter = dev_letter
return vc.to_xmlobject()
def blk_ceph():
ic = BlkCeph()
ic.volume = volume
ic.dev_letter = dev_letter
ic.bus_type = self._get_controller_type()
return ic.to_xmlobject()
def virtio_scsi_ceph():
vsc = VirtioSCSICeph()
vsc.volume = volume
vsc.dev_letter = dev_letter
return vsc.to_xmlobject()
if volume.useVirtioSCSI:
return virtio_scsi_ceph()
else:
if volume.useVirtio:
return virtoio_ceph()
else:
return blk_ceph()
def block_volume():
# type: () -> etree.Element
def blk():
disk = etree.Element('disk', {'type': 'block', 'device': 'disk', 'snapshot': 'external'})
e(disk, 'driver', None,
{'name': 'qemu', 'type': 'raw', 'cache': 'none', 'io': 'native'})
e(disk, 'source', None, {'dev': volume.installPath})
if volume.useVirtioSCSI:
e(disk, 'target', None, {'dev': 'sd%s' % dev_letter, 'bus': 'scsi'})
e(disk, 'wwn', volume.wwn)
else:
e(disk, 'target', None, {'dev': 'vd%s' % dev_letter, 'bus': 'virtio'})
return disk
return blk()
def spool_volume():
# type: () -> etree.Element
def blk():
imgfmt = linux.get_img_fmt(volume.installPath)
disk = etree.Element('disk', {'type': 'network', 'device': 'disk'})
e(disk, 'driver', None,
{'name': 'qemu', 'type': 'raw', 'cache': 'none', 'io': 'native'})
e(disk, 'source', None,
{'protocol': 'spool', 'name': make_spool_conf(imgfmt, dev_letter, volume)})
e(disk, 'target', None, {'dev': 'vd%s' % dev_letter, 'bus': 'virtio'})
return disk
return blk()
dev_letter = self._get_device_letter(volume, addons)
if volume.deviceType == 'iscsi':
disk_element = iscsibased_volume()
elif volume.deviceType == 'file':
disk_element = filebased_volume()
elif volume.deviceType == 'ceph':
disk_element = ceph_volume()
elif volume.deviceType == 'scsilun':
disk_element = scsilun_volume()
elif volume.deviceType == 'block':
disk_element = block_volume()
elif volume.deviceType == 'spool':
disk_element = spool_volume()
else:
raise Exception('unsupported volume deviceType[%s]' % volume.deviceType)
Vm.set_device_address(disk_element, volume, get_vm_by_uuid(self.uuid))
Vm.set_volume_qos(addons, volume.volumeUuid, disk_element)
Vm.set_volume_serial_id(volume.volumeUuid, disk_element)
volume_native_aio(disk_element)
xml = etree.tostring(disk_element)
logger.debug('attaching volume[%s] to vm[uuid:%s]:\n%s' % (volume.installPath, self.uuid, xml))
try:
# libvirt has a bug that if attaching volume just after vm created, it likely fails. So we retry three time here
@linux.retry(times=3, sleep_time=5)
def attach():
def wait_for_attach(_):
me = get_vm_by_uuid(self.uuid)
disk, _ = me._get_target_disk(volume, is_exception=False)
if not disk:
logger.debug('volume[%s] is still in process of attaching, wait it' % volume.installPath)
return bool(disk)
try:
self.domain.attachDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_LIVE)
if not linux.wait_callback_success(wait_for_attach, None, 5, 1):
raise Exception("cannot attach a volume[uuid: %s] to the vm[uuid: %s];"
"it's still not attached after 5 seconds" % (volume.volumeUuid, self.uuid))
except:
# check one more time
if not wait_for_attach(None):
raise
attach()
except libvirt.libvirtError as ex:
err = str(ex)
if 'Duplicate ID' in err:
err = ('unable to attach the volume[%s] to vm[uuid: %s], %s. This is a KVM issue, please reboot'
' the VM and try again' % (volume.volumeUuid, self.uuid, err))
elif 'No more available PCI slots' in err:
err = ('vm[uuid: %s] has no more PCI slots for volume[%s]. This is a Libvirt issue, please reboot'
' the VM and try again' % (self.uuid, volume.volumeUuid))
else:
err = 'unable to attach the volume[%s] to vm[uuid: %s], %s.' % (volume.volumeUuid, self.uuid, err)
logger.warn(linux.get_exception_stacktrace())
raise kvmagent.KvmError(err)
def _get_device_letter(self, volume, addons):
default_letter = Vm.DEVICE_LETTERS[volume.deviceId]
if not volume.useVirtioSCSI:
return default_letter
# usually, device_letter_index equals device_id, but reversed when volume use VirtioSCSI because of ZSTAC-9641
# so when attach SCSI volume again after detached it, device_letter should be same as origin name,
# otherwise it will fail for duplicate device name.
def get_reversed_disks():
results = {}
for vol in addons.attachedDataVolumes:
_, disk_name = self._get_target_disk(vol)
if disk_name and disk_name[-1] != Vm.DEVICE_LETTERS[vol.deviceId]:
results[disk_name[-1]] = vol.deviceId
return results
# {actual_dev_letter: device_id_in_db}
# type: dict[str, int]
reversed_disks = get_reversed_disks()
if default_letter not in reversed_disks.keys():
return default_letter
else:
# letter has been occupied, so return reversed letter
logger.debug("reversed disk name: %s" % reversed_disks)
return Vm.DEVICE_LETTERS[reversed_disks[default_letter]]
def detach_data_volume(self, volume):
self._wait_vm_run_until_seconds(10)
self.timeout_object.wait_until_object_timeout('attach-volume-%s' % self.uuid)
self._detach_data_volume(volume)
self.timeout_object.put('detach-volume-%s' % self.uuid, timeout=10)
def _detach_data_volume(self, volume):
assert volume.deviceId != 0, 'how can root volume gets detached???'
target_disk, disk_name = self._get_target_disk(volume, is_exception=False)
if not target_disk:
if self._volume_detach_timed_out(volume):
logger.debug('volume [installPath: %s] has been detached before' % volume.installPath)
self._clean_timeout_record(volume)
return
raise kvmagent.KvmError('unable to find data volume[%s] on vm[uuid:%s]' % (disk_name, self.uuid))
xmlstr = target_disk.dump()
logger.debug('detaching volume from vm[uuid:%s]:\n%s' % (self.uuid, xmlstr))
try:
# libvirt has a bug that if detaching volume just after vm created, it likely fails. So we retry three time here
@linux.retry(times=3, sleep_time=5)
def detach():
def wait_for_detach(_):
me = get_vm_by_uuid(self.uuid)
disk, _ = me._get_target_disk(volume, is_exception=False)
if disk:
logger.debug('volume[%s] is still in process of detaching, wait for it' % volume.installPath)
return not bool(disk)
try:
self.domain.detachDeviceFlags(xmlstr, libvirt.VIR_DOMAIN_AFFECT_LIVE)
if not linux.wait_callback_success(wait_for_detach, None, 5, 1):
raise Exception("unable to detach the volume[uuid:%s] from the vm[uuid:%s];"
"it's still attached after 5 seconds" %
(volume.volumeUuid, self.uuid))
except:
# check one more time
if not wait_for_detach(None):
self._record_volume_detach_timeout(volume)
logger.debug("detach timeout, record volume install path: %s" % volume.installPath)
raise
detach()
if self._volume_detach_timed_out(volume):
self._clean_timeout_record(volume)
logger.debug("detach success finally, remove record of volume install path: %s" % volume.installPath)
def logout_iscsi():
BlkIscsi.logout_portal(target_disk.source.dev_)
if volume.deviceType == 'iscsi':
if not volume.useVirtio:
logout_iscsi()
except libvirt.libvirtError as ex:
vm = get_vm_by_uuid(self.uuid)
logger.warn('vm dump: %s' % vm.domain_xml)
logger.warn(linux.get_exception_stacktrace())
raise kvmagent.KvmError(
'unable to detach volume[%s] from vm[uuid:%s], %s' % (volume.installPath, self.uuid, str(ex)))
def _record_volume_detach_timeout(self, volume):
Vm.timeout_detached_vol.add(volume.installPath + "-" + self.uuid)
def _volume_detach_timed_out(self, volume):
return volume.installPath + "-" + self.uuid in Vm.timeout_detached_vol
def _clean_timeout_record(self, volume):
Vm.timeout_detached_vol.remove(volume.installPath + "-" + self.uuid)
def _get_back_file(self, volume):
back = linux.qcow2_get_backing_file(volume)
return None if not back else back
def _get_backfile_chain(self, current):
back_files = []
def get_back_files(volume):
back_file = self._get_back_file(volume)
if not back_file:
return
back_files.append(back_file)
get_back_files(back_file)
get_back_files(current)
return back_files
@staticmethod
def ensure_no_internal_snapshot(volume):
if os.path.exists(volume) and shell.run("%s --backing-chain %s | grep 'Snapshot list:'"
% (qemu_img.subcmd('info'), volume)) == 0:
raise kvmagent.KvmError('found internal snapshot in the backing chain of volume[path:%s].' % volume)
# NOTE: code from Openstack nova
def _wait_for_block_job(self, disk_path, abort_on_error=False,
wait_for_job_clean=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:returns: True if still in progress
False if completed
"""
status = self.domain.blockJobInfo(disk_path, 0)
if status == -1 and abort_on_error:
raise kvmagent.KvmError('libvirt error while requesting blockjob info.')
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception as e:
logger.warn(linux.get_exception_stacktrace())
return False
if wait_for_job_clean:
job_ended = not status
else:
job_ended = cur == end
return not job_ended
def _get_target_disk_by_path(self, installPath, is_exception=True):
if installPath.startswith('sharedblock'):
installPath = shared_block_to_file(installPath)
for disk in self.domain_xmlobject.devices.get_child_node_as_list('disk'):
if not xmlobject.has_element(disk, 'source'):
continue
# file
if disk.source.file__ and disk.source.file_ == installPath:
return disk, disk.target.dev_
# ceph
if disk.source.name__ and disk.source.name_ in installPath:
return disk, disk.target.dev_
# 'block':
if disk.source.dev__ and disk.source.dev_ in installPath:
return disk, disk.target.dev_
if not is_exception:
return None, None
logger.debug('%s is not found on the vm[uuid:%s]' % (installPath, self.uuid))
raise kvmagent.KvmError('unable to find volume[installPath:%s] on vm[uuid:%s]' % (installPath, self.uuid))
def _get_all_volume_alias_names(self, volumes):
volumes.sort(key=lambda d: d.deviceId)
target_disk_alias_names = []
for volume in volumes:
target_disk, _ = self._get_target_disk(volume)
target_disk_alias_names.append(target_disk.alias.name_)
if len(volumes) != len(target_disk_alias_names):
raise Exception('not all disk have alias names, skip rollback')
return target_disk_alias_names
def _get_target_disk(self, volume, is_exception=True):
if volume.installPath.startswith('sharedblock'):
volume.installPath = shared_block_to_file(volume.installPath)
for disk in self.domain_xmlobject.devices.get_child_node_as_list('disk'):
if not xmlobject.has_element(disk, 'source') and not volume.deviceType == 'quorum':
continue
if volume.deviceType == 'iscsi':
if volume.useVirtio:
if disk.source.name__ and disk.source.name_ in volume.installPath:
return disk, disk.target.dev_
else:
if disk.source.dev__ and volume.volumeUuid in disk.source.dev_:
return disk, disk.target.dev_
elif volume.deviceType == 'file':
if disk.source.file__ and disk.source.file_ == volume.installPath:
return disk, disk.target.dev_
elif volume.deviceType == 'ceph':
if disk.source.name__ and disk.source.name_ in volume.installPath:
return disk, disk.target.dev_
elif volume.deviceType == 'scsilun':
if disk.source.dev__ and volume.installPath in disk.source.dev_:
return disk, disk.target.dev_
elif volume.deviceType == 'block':
if disk.source.dev__ and disk.source.dev_ in volume.installPath:
return disk, disk.target.dev_
elif volume.deviceType == 'quorum':
logger.debug("quorum file path is %s" % disk.backingStore.source.file_)
if disk.backingStore.source.file_ and disk.backingStore.source.file_ in volume.installPath:
disk.driver.type_ = "qcow2"
disk.source = disk.backingStore.source
return disk, disk.backingStore.source.file_
if not is_exception:
return None, None
logger.debug('%s is not found on the vm[uuid:%s], xml: %s' % (volume.installPath, self.uuid, self.domain_xml))
raise kvmagent.KvmError('unable to find volume[installPath:%s] on vm[uuid:%s]' % (volume.installPath, self.uuid))
def _is_ft_vm(self):
return any(disk.type_ == "quorum" for disk in self.domain_xmlobject.devices.get_child_node_as_list('disk'))
def resize_volume(self, volume, size):
device_id = volume.deviceId
target_disk, disk_name = self._get_target_disk(volume)
alias_name = target_disk.alias.name_
r, o, e = bash.bash_roe("virsh qemu-monitor-command %s block_resize drive-%s %sB --hmp"
% (self.uuid, alias_name, size))
logger.debug("resize volume[%s] of vm[%s]" % (alias_name, self.uuid))
if r != 0:
raise kvmagent.KvmError(
'unable to resize volume[id:{1}] of vm[uuid:{0}] because {2}'.format(device_id, self.uuid, e))
def take_live_volumes_delta_snapshots(self, vs_structs):
"""
:type vs_structs: list[VolumeSnapshotJobStruct]
:rtype: list[VolumeSnapshotResultStruct]
"""
disk_names = []
return_structs = []
memory_snapshot_struct = None
snapshot = etree.Element('domainsnapshot')
disks = e(snapshot, 'disks')
logger.debug(snapshot)
if len(vs_structs) == 0:
return return_structs
def get_size(install_path):
"""
:rtype: long
"""
return VmPlugin._get_snapshot_size(install_path)
logger.debug(vs_structs)
need_memory_snapshot = False
for vs_struct in vs_structs:
if vs_struct.live is False or vs_struct.full is True:
raise kvmagent.KvmError("volume %s is not live or full snapshot specified, "
"can not proceed")
if vs_struct.memory:
e(snapshot, 'memory', None, attrib={'snapshot': 'external', 'file': vs_struct.installPath})
need_memory_snapshot = True
snapshot_dir = os.path.dirname(vs_struct.installPath)
if not os.path.exists(snapshot_dir):
os.makedirs(snapshot_dir)
memory_snapshot_struct = vs_struct
continue
target_disk, disk_name = self._get_target_disk(vs_struct.volume)
if target_disk is None:
logger.debug("can not find %s" % vs_struct.volume.deviceId)
continue
snapshot_dir = os.path.dirname(vs_struct.installPath)
if not os.path.exists(snapshot_dir):
os.makedirs(snapshot_dir)
disk_names.append(disk_name)
d = e(disks, 'disk', None, attrib={'name': disk_name, 'snapshot': 'external', 'type': 'file'})
e(d, 'source', None, attrib={'file': vs_struct.installPath})
e(d, 'driver', None, attrib={'type': 'qcow2'})
return_structs.append(VolumeSnapshotResultStruct(
vs_struct.volumeUuid,
target_disk.source.file_,
vs_struct.installPath,
get_size(target_disk.source.file_)))
self.refresh()
for disk in self.domain_xmlobject.devices.get_child_node_as_list('disk'):
if disk.target.dev_ not in disk_names:
e(disks, 'disk', None, attrib={'name': disk.target.dev_, 'snapshot': 'no'})
xml = etree.tostring(snapshot)
logger.debug('creating live snapshot for vm[uuid:{0}] volumes[id:{1}]:\n{2}'.format(self.uuid, disk_names, xml))
snap_flags = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC
if not need_memory_snapshot:
snap_flags |= libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY
try:
self.domain.snapshotCreateXML(xml, snap_flags)
if memory_snapshot_struct:
return_structs.append(VolumeSnapshotResultStruct(
memory_snapshot_struct.volumeUuid,
memory_snapshot_struct.installPath,
memory_snapshot_struct.installPath,
get_size(memory_snapshot_struct.installPath)))
return return_structs
except libvirt.libvirtError as ex:
logger.warn(linux.get_exception_stacktrace())
raise kvmagent.KvmError(
'unable to take live snapshot of vm[uuid:{0}] volumes[id:{1}], {2}'.format(self.uuid, disk_names, str(ex)))
def take_volume_snapshot(self, volume, install_path, full_snapshot=False):
device_id = volume.deviceId
target_disk, disk_name = self._get_target_disk(volume)
snapshot_dir = os.path.dirname(install_path)
if not os.path.exists(snapshot_dir):
os.makedirs(snapshot_dir)
previous_install_path = target_disk.source.file_
back_file_len = len(self._get_backfile_chain(previous_install_path))
# for RHEL, base image's back_file_len == 1; for ubuntu back_file_len == 0
first_snapshot = full_snapshot and (back_file_len == 1 or back_file_len == 0)
def take_delta_snapshot():
snapshot = etree.Element('domainsnapshot')
disks = e(snapshot, 'disks')
d = e(disks, 'disk', None, attrib={'name': disk_name, 'snapshot': 'external', 'type': 'file'})
e(d, 'source', None, attrib={'file': install_path})
e(d, 'driver', None, attrib={'type': 'qcow2'})
# QEMU 2.3 default create snapshots on all devices
# but we only need for one
self.refresh()
for disk in self.domain_xmlobject.devices.get_child_node_as_list('disk'):
if disk.target.dev_ != disk_name:
e(disks, 'disk', None, attrib={'name': disk.target.dev_, 'snapshot': 'no'})
xml = etree.tostring(snapshot)
logger.debug('creating snapshot for vm[uuid:{0}] volume[id:{1}]:\n{2}'.format(self.uuid, device_id, xml))
snap_flags = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA
try:
self.domain.snapshotCreateXML(xml, snap_flags)
return previous_install_path, install_path
except libvirt.libvirtError as ex:
logger.warn(linux.get_exception_stacktrace())
raise kvmagent.KvmError(
'unable to take snapshot of vm[uuid:{0}] volume[id:{1}], {2}'.format(self.uuid, device_id, str(ex)))
def take_full_snapshot():
self.block_stream_disk(volume)
return take_delta_snapshot()
if first_snapshot:
# the first snapshot is always full snapshot
# at this moment, delta snapshot returns the original volume as full snapshot
return take_delta_snapshot()
if full_snapshot:
return take_full_snapshot()
else:
return take_delta_snapshot()
def block_stream_disk(self, volume):
target_disk, disk_name = self._get_target_disk(volume)
install_path = target_disk.source.file_
logger.debug('start block stream for disk %s' % disk_name)
self.domain.blockRebase(disk_name, None, 0, 0)
logger.debug('block stream for disk %s in processing' % disk_name)
def wait_job(_):
logger.debug('block stream is waiting for %s blockRebase job completion' % disk_name)
return not self._wait_for_block_job(disk_name, abort_on_error=True)
if not linux.wait_callback_success(wait_job, timeout=21600, ignore_exception_in_callback=True):
raise kvmagent.KvmError('block stream failed')
def wait_backing_file_cleared(_):
return not linux.qcow2_get_backing_file(install_path)
if not linux.wait_callback_success(wait_backing_file_cleared, timeout=60, ignore_exception_in_callback=True):
raise kvmagent.KvmError('block stream succeeded, but backing file is not cleared')
def list_blk_sources(self):
"""list domain blocks (aka. domblklist) -- but with sources only"""
tree = etree.fromstring(self.domain_xml)
res = []
for disk in tree.findall("devices/disk"):
for src in disk.findall("source"):
src_file = src.get("file")
if src_file is None:
continue
res.append(src_file)
return res
def migrate(self, cmd):
if self.state == Vm.VM_STATE_SHUTDOWN:
raise kvmagent.KvmError('vm[uuid:%s] is stopped, cannot live migrate,' % cmd.vmUuid)
current_hostname = linux.get_host_name()
if cmd.migrateFromDestination:
hostname = cmd.destHostIp.replace('.', '-')
else:
hostname = cmd.srcHostIp.replace('.', '-')
if current_hostname == 'localhost.localdomain' or current_hostname == 'localhost':
# set the hostname, otherwise the migration will fail
shell.call('hostname %s.zstack.org' % hostname)
destHostIp = cmd.destHostIp
destUrl = "qemu+tcp://{0}/system".format(destHostIp)
tcpUri = "tcp://{0}".format(destHostIp)
flag = (libvirt.VIR_MIGRATE_LIVE |
libvirt.VIR_MIGRATE_PEER2PEER |
libvirt.VIR_MIGRATE_UNDEFINE_SOURCE)
if cmd.autoConverge:
flag |= libvirt.VIR_MIGRATE_AUTO_CONVERGE
if cmd.xbzrle:
flag |= libvirt.VIR_MIGRATE_COMPRESSED
if cmd.storageMigrationPolicy == 'FullCopy':
flag |= libvirt.VIR_MIGRATE_NON_SHARED_DISK
elif cmd.storageMigrationPolicy == 'IncCopy':
flag |= libvirt.VIR_MIGRATE_NON_SHARED_INC
# to workaround libvirt bug (c.f. RHBZ#1494454)
if LIBVIRT_MAJOR_VERSION >= 4:
if any(s.startswith('/dev/') for s in self.list_blk_sources()):
flag |= libvirt.VIR_MIGRATE_UNSAFE
if cmd.useNuma:
flag |= libvirt.VIR_MIGRATE_PERSIST_DEST
stage = get_task_stage(cmd)
timeout = 1800 if cmd.timeout is None else cmd.timeout
class MigrateDaemon(plugin.TaskDaemon):
def __init__(self, domain):
super(MigrateDaemon, self).__init__(cmd, 'MigrateVm', timeout)
self.domain = domain
def _get_percent(self):
try:
stats = self.domain.jobStats()
if libvirt.VIR_DOMAIN_JOB_DATA_REMAINING in stats and libvirt.VIR_DOMAIN_JOB_DATA_TOTAL in stats:
remain = stats[libvirt.VIR_DOMAIN_JOB_DATA_REMAINING]
total = stats[libvirt.VIR_DOMAIN_JOB_DATA_TOTAL]
if total == 0:
return
percent = min(99, 100.0 - remain * 100.0 / total)
return get_exact_percent(percent, stage)
except libvirt.libvirtError:
pass
except:
logger.debug(linux.get_exception_stacktrace())
def _cancel(self):
logger.debug('cancelling vm[uuid:%s] migration' % cmd.vmUuid)
self.domain.abortJob()
def __exit__(self, exc_type, exc_val, exc_tb):
super(MigrateDaemon, self).__exit__(exc_type, exc_val, exc_tb)
if exc_type == libvirt.libvirtError:
raise kvmagent.KvmError(
'unable to migrate vm[uuid:%s] to %s, %s' % (cmd.vmUuid, destUrl, str(exc_val)))
with MigrateDaemon(self.domain):
logger.debug('migrating vm[uuid:{0}] to dest url[{1}]'.format(self.uuid, destUrl))
self.domain.migrateToURI2(destUrl, tcpUri, None, flag, None, 0)
try:
logger.debug('migrating vm[uuid:{0}] to dest url[{1}]'.format(self.uuid, destUrl))
if not linux.wait_callback_success(self.wait_for_state_change, callback_data=None, timeout=timeout):
try: self.domain.abortJob()
except: pass
raise kvmagent.KvmError('timeout after %d seconds' % timeout)
except kvmagent.KvmError:
raise
except:
logger.debug(linux.get_exception_stacktrace())
logger.debug('successfully migrated vm[uuid:{0}] to dest url[{1}]'.format(self.uuid, destUrl))
def _interface_cmd_to_xml(self, cmd, action=None):
vhostSrcPath = cmd.addons['vhostSrcPath'] if cmd.addons else None
brMode = cmd.addons['brMode'] if cmd.addons else None
interface = Vm._build_interface_xml(cmd.nic, None, vhostSrcPath, action, brMode)
def addon():
if cmd.addons and cmd.addons['NicQos']:
qos = cmd.addons['NicQos']
Vm._add_qos_to_interface(interface, qos)
addon()
return etree.tostring(interface)
def _wait_vm_run_until_seconds(self, sec):
vm_pid = linux.find_process_by_cmdline([kvmagent.get_qemu_path(), self.uuid])
if not vm_pid:
raise Exception('cannot find pid for vm[uuid:%s]' % self.uuid)
up_time = linux.get_process_up_time_in_second(vm_pid)
def wait(_):
return linux.get_process_up_time_in_second(vm_pid) > sec
if up_time < sec and not linux.wait_callback_success(wait, timeout=60):
raise Exception("vm[uuid:%s] seems hang, its process[pid:%s] up-time is not increasing after %s seconds" %
(self.uuid, vm_pid, 60))
def attach_iso(self, cmd):
iso = cmd.iso
if iso.deviceId >= len(self.ISO_DEVICE_LETTERS):
err = 'vm[uuid:%s] exceeds max iso limit, device id[%s], but only 0 ~ %d are allowed' % (self.uuid, iso.deviceId, len(self.ISO_DEVICE_LETTERS) - 1)
logger.warn(err)
raise kvmagent.KvmError(err)
device_letter = self.ISO_DEVICE_LETTERS[iso.deviceId]
dev = self._get_iso_target_dev(device_letter)
bus = self._get_controller_type()
if iso.path.startswith('ceph'):
ic = IsoCeph()
ic.iso = iso
cdrom = ic.to_xmlobject(dev, bus)
else:
if iso.path.startswith('sharedblock'):
iso.path = shared_block_to_file(iso.path)
cdrom = etree.Element('disk', {'type': 'file', 'device': 'cdrom'})
e(cdrom, 'driver', None, {'name': 'qemu', 'type': 'raw'})
e(cdrom, 'source', None, {'file': iso.path})
e(cdrom, 'target', None, {'dev': dev, 'bus': bus})
e(cdrom, 'readonly', None)
xml = etree.tostring(cdrom)
if LIBVIRT_MAJOR_VERSION >= 4:
addr = find_domain_cdrom_address(self.domain.XMLDesc(0), dev)
ridx = xml.rindex('<')
xml = xml[:ridx] + addr.dump() + xml[ridx:]
logger.debug('attaching ISO to the vm[uuid:%s]:\n%s' % (self.uuid, xml))
try:
self.domain.updateDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_LIVE)
except libvirt.libvirtError as ex:
err = str(ex)
logger.warn('unable to attach the iso to the VM[uuid:%s], %s' % (self.uuid, err))
if "QEMU command 'change': error connecting: Operation not supported" in err:
raise Exception('cannot hotplug ISO to the VM[uuid:%s]. It is a libvirt bug: %s.'
' you can power-off the vm and attach again.' %
(self.uuid, 'https://bugzilla.redhat.com/show_bug.cgi?id=1541702'))
elif 'timed out waiting for disk tray status update' in err:
raise Exception(
'unable to attach the iso to the VM[uuid:%s]. It seems met some internal error,'
' you can reboot the vm and try again' % self.uuid)
else:
raise Exception('unable to attach the iso to the VM[uuid:%s].' % self.uuid)
def check(_):
me = get_vm_by_uuid(self.uuid)
for disk in me.domain_xmlobject.devices.get_child_node_as_list('disk'):
if disk.device_ == "cdrom" and xmlobject.has_element(disk, 'source'):
if disk.target.dev__ and disk.target.dev_ == dev:
return True
return False
if not linux.wait_callback_success(check, None, 30, 1):
raise Exception('cannot attach the iso[%s] for the VM[uuid:%s]. The device is not present after 30s' %
(iso.path, cmd.vmUuid))
def detach_iso(self, cmd):
cdrom = None
for disk in self.domain_xmlobject.devices.get_child_node_as_list('disk'):
if disk.device_ == "cdrom":
cdrom = disk
break
if not cdrom:
return
device_letter = self.ISO_DEVICE_LETTERS[cmd.deviceId]
dev = self._get_iso_target_dev(device_letter)
bus = self._get_controller_type()
cdrom = etree.Element('disk', {'type': 'file', 'device': 'cdrom'})
e(cdrom, 'driver', None, {'name': 'qemu', 'type': 'raw'})
e(cdrom, 'target', None, {'dev': dev, 'bus': bus})
e(cdrom, 'readonly', None)
xml = etree.tostring(cdrom)
if LIBVIRT_MAJOR_VERSION >= 4:
addr = find_domain_cdrom_address(self.domain.XMLDesc(0), dev)
ridx = xml.rindex('<')
xml = xml[:ridx] + addr.dump() + xml[ridx:]
logger.debug('detaching ISO from the vm[uuid:%s]:\n%s' % (self.uuid, xml))
try:
self.domain.updateDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_LIVE | libvirt.VIR_DOMAIN_DEVICE_MODIFY_FORCE)
except libvirt.libvirtError as ex:
err = str(ex)
logger.warn('unable to detach the iso from the VM[uuid:%s], %s' % (self.uuid, err))
if 'is locked' in err and 'eject' in err:
raise Exception(
'unable to detach the iso from the VM[uuid:%s]. It seems the ISO is still mounted in the operating system'
', please umount it first' % self.uuid)
else:
raise Exception(
'unable to detach the iso from the VM[uuid:%s]' % self.uuid)
def check(_):
me = get_vm_by_uuid(self.uuid)
for disk in me.domain_xmlobject.devices.get_child_node_as_list('disk'):
if disk.device_ == "cdrom" and xmlobject.has_element(disk, 'source') == False:
if disk.target.dev__ and disk.target.dev_ == dev:
return True
return False
if not linux.wait_callback_success(check, None, 30, 1):
raise Exception('cannot detach the cdrom from the VM[uuid:%s]. The device is still present after 30s' %
self.uuid)
def _get_controller_type(self):
is_q35 = 'q35' in self.domain_xmlobject.os.type.machine_
return ('ide', 'sata', 'scsi')[max(is_q35, (HOST_ARCH in ['aarch64', 'mips64el']) * 2)]
@staticmethod
def _get_iso_target_dev(device_letter):
return "sd%s" % device_letter if (HOST_ARCH in ['aarch64', 'mips64el']) else 'hd%s' % device_letter
@staticmethod
def _get_disk_target_dev_format(bus_type):
return {'virtio': 'vd%s', 'scsi': 'sd%s', 'sata': 'hd%s', 'ide': 'hd%s'}[bus_type]
def hotplug_mem(self, memory_size):
mem_size = (memory_size - self.get_memory()) / 1024
xml = "<memory model='dimm'><target><size unit='KiB'>%d</size><node>0</node></target></memory>" % mem_size
logger.debug('hot plug memory: %d KiB' % mem_size)
try:
self.domain.attachDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_LIVE | libvirt.VIR_DOMAIN_AFFECT_CONFIG)
except libvirt.libvirtError as ex:
err = str(ex)
logger.warn('unable to hotplug memory in vm[uuid:%s], %s' % (self.uuid, err))
if "cannot set up guest memory" in err:
raise kvmagent.KvmError("No enough physical memory for guest")
elif "would exceed domain's maxMemory config" in err:
raise kvmagent.KvmError(err + "; please check if you have rebooted the VM to make NUMA take effect")
else:
raise kvmagent.KvmError(err)
return
def hotplug_cpu(self, cpu_num):
logger.debug('set cpus: %d cpus' % cpu_num)
try:
self.domain.setVcpusFlags(cpu_num, libvirt.VIR_DOMAIN_AFFECT_LIVE | libvirt.VIR_DOMAIN_AFFECT_CONFIG)
except libvirt.libvirtError as ex:
err = str(ex)
logger.warn('unable to set cpus in vm[uuid:%s], %s' % (self.uuid, err))
if "requested vcpus is greater than max" in err:
err += "; please check if you have rebooted the VM to make NUMA take effect"
raise kvmagent.KvmError(err)
return
@linux.retry(times=3, sleep_time=5)
def _attach_nic(self, cmd):
def check_device(_):
self.refresh()
for iface in self.domain_xmlobject.devices.get_child_node_as_list('interface'):
if iface.mac.address_ == cmd.nic.mac:
# vf nic doesn't have internal name
if cmd.nic.pciDeviceAddress is not None:
return True
else:
return linux.is_network_device_existing(cmd.nic.nicInternalName)
return False
try:
if check_device(None):
return
xml = self._interface_cmd_to_xml(cmd, action='Attach')
logger.debug('attaching nic:\n%s' % xml)
if self.state == self.VM_STATE_RUNNING or self.state == self.VM_STATE_PAUSED:
self.domain.attachDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_LIVE)
else:
self.domain.attachDevice(xml)
if not linux.wait_callback_success(check_device, interval=0.5, timeout=30):
raise Exception('nic device does not show after 30 seconds')
except:
# check one more time
if not check_device(None):
raise
def attach_nic(self, cmd):
self._wait_vm_run_until_seconds(10)
self.timeout_object.wait_until_object_timeout('%s-attach-nic' % self.uuid)
try:
self._attach_nic(cmd)
except libvirt.libvirtError as ex:
err = str(ex)
if 'Duplicate ID' in err:
err = ('unable to attach a L3 network to the vm[uuid:%s], %s. This is a KVM issue, please reboot'
' the vm and try again' % (self.uuid, err))
elif 'No more available PCI slots' in err:
err = ('vm[uuid: %s] has no more PCI slots for vm nic[mac:%s]. This is a Libvirt issue, please reboot'
' the VM and try again' % (self.uuid, cmd.nic.mac))
else:
err = 'unable to attach a L3 network to the vm[uuid:%s], %s' % (self.uuid, err)
raise kvmagent.KvmError(err)
# in 10 seconds, no detach-nic operation can be performed,
# work around libvirt bug
self.timeout_object.put('%s-detach-nic' % self.uuid, timeout=10)
@linux.retry(times=3, sleep_time=5)
def _detach_nic(self, cmd):
def check_device(_):
self.refresh()
for iface in self.domain_xmlobject.devices.get_child_node_as_list('interface'):
if iface.mac.address_ == cmd.nic.mac:
return False
return shell.run('ip link show dev %s > /dev/null' % cmd.nic.nicInternalName) != 0
if check_device(None):
return
try:
xml = self._interface_cmd_to_xml(cmd, action='Detach')
logger.debug('detaching nic:\n%s' % xml)
if self.state == self.VM_STATE_RUNNING or self.state == self.VM_STATE_PAUSED:
self.domain.detachDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_LIVE)
else:
self.domain.detachDevice(xml)
if not linux.wait_callback_success(check_device, interval=0.5, timeout=10):
raise Exception('NIC device is still attached after 10 seconds. Please check virtio driver or stop VM and detach again.')
except:
# check one more time
if not check_device(None):
logger.warn('failed to detach a nic[mac:%s], dump vm xml:\n%s' % (cmd.nic.mac, self.domain_xml))
raise
def detach_nic(self, cmd):
self._wait_vm_run_until_seconds(10)
self.timeout_object.wait_until_object_timeout('%s-detach-nic' % self.uuid)
self._detach_nic(cmd)
# in 10 seconds, no attach-nic operation can be performed,
# to work around libvirt bug
self.timeout_object.put('%s-attach-nic' % self.uuid, timeout=10)
def update_nic(self, cmd):
self._wait_vm_run_until_seconds(10)
self.timeout_object.wait_until_object_timeout('%s-update-nic' % self.uuid)
self._update_nic(cmd)
self.timeout_object.put('%s-update-nic' % self.uuid, timeout=10)
def _update_nic(self, cmd):
if not cmd.nics:
return
def check_device(nic):
self.refresh()
for iface in self.domain_xmlobject.devices.get_child_node_as_list('interface'):
if iface.mac.address_ == nic.mac:
return linux.is_network_device_existing(nic.nicInternalName)
return False
def addon(nic_xml_object):
if cmd.addons and cmd.addons['NicQos'] and cmd.addons['NicQos'][nic.uuid]:
qos = cmd.addons['NicQos'][nic.uuid]
Vm._add_qos_to_interface(nic_xml_object, qos)
for nic in cmd.nics:
interface = Vm._build_interface_xml(nic)
addon(interface)
xml = etree.tostring(interface)
logger.debug('updating nic:\n%s' % xml)
if self.state == self.VM_STATE_RUNNING or self.state == self.VM_STATE_PAUSED:
self.domain.updateDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_LIVE)
else:
self.domain.updateDeviceFlags(xml)
if not linux.wait_callback_success(check_device, nic, interval=0.5, timeout=30):
raise Exception('nic device does not show after 30 seconds')
def _check_qemuga_info(self, info):
if info:
for command in info["return"]["supported_commands"]:
if command["name"] == "guest-set-user-password":
if command["enabled"]:
return True
return False
def _wait_until_qemuga_ready(self, timeout, uuid):
finish_time = time.time() + (timeout / 1000)
while time.time() < finish_time:
state = get_all_vm_states().get(uuid)
if state != Vm.VM_STATE_RUNNING:
raise kvmagent.KvmError("vm's state is %s, not running" % state)
r, o, e = bash.bash_roe("virsh qemu-agent-command %s --cmd '{\"execute\":\"guest-info\"}'" % self.uuid)
if r != 0:
logger.warn("get guest info from vm[uuid:%s]: %s, %s" % (self.uuid, o, e))
else:
logger.debug("qga_json: %s" % o)
info = json.loads(o)['return']
if LooseVersion(info["version"]) < LooseVersion('2.3'):
raise kvmagent.KvmError("You need to install version 2.3 or above to support set user password ,qga current version is %s" % info["version"])
else:
return True
time.sleep(2)
raise kvmagent.KvmError("qemu-agent service is not ready in vm...")
def _escape_char_password(self, password):
escape_str = "\*\#\(\)\<\>\|\"\'\/\\\$\`\&\{\}"
des = ""
for c in list(password):
if c in escape_str:
des += "\\"
des += c
return des
def change_vm_password(self, cmd):
uuid = self.uuid
# check the vm state first, then choose the method in different way
state = get_all_vm_states().get(uuid)
timeout = 60000
if state == Vm.VM_STATE_RUNNING:
# before set-user-password, we must check if os ready in the guest
self._wait_until_qemuga_ready(timeout, uuid)
try:
escape_password = self._escape_char_password(cmd.accountPerference.accountPassword)
shell.call('virsh set-user-password %s %s %s' % (self.uuid,
cmd.accountPerference.userAccount,
escape_password))
except Exception as e:
logger.warn(e.message)
if e.message.find("child process has failed to set user password") > 0:
logger.warn('user [%s] not exist!' % cmd.accountPerference.userAccount)
raise kvmagent.KvmError('user [%s] not exist on vm[uuid: %s]!' % (cmd.accountPerference.userAccount, uuid))
else:
raise e
else:
raise kvmagent.KvmError("vm is not running, cannot connect to qemu-ga")
def merge_snapshot(self, cmd):
target_disk, disk_name = self._get_target_disk(cmd.volume)
@linux.retry(times=3, sleep_time=3)
def do_pull(base, top):
logger.debug('start block rebase [active: %s, new backing: %s]' % (top, base))
# Double check (c.f. issue #1323)
def wait_previous_job(_):
logger.debug('merge snapshot is checking previous block job')
return not self._wait_for_block_job(disk_name, abort_on_error=True)
if not linux.wait_callback_success(wait_previous_job, timeout=21600, ignore_exception_in_callback=True):
raise kvmagent.KvmError('merge snapshot failed - pending previous block job')
self.domain.blockRebase(disk_name, base, 0)
def wait_job(_):
logger.debug('merging snapshot chain is waiting for blockRebase job completion')
return not self._wait_for_block_job(disk_name, abort_on_error=True)
if not linux.wait_callback_success(wait_job, timeout=21600):
raise kvmagent.KvmError('live merging snapshot chain failed, timeout after 6 hours')
# Double check (c.f. issue #757)
if self._get_back_file(top) != base:
raise kvmagent.KvmError('[libvirt bug] live merge snapshot failed')
logger.debug('end block rebase [active: %s, new backing: %s]' % (top, base))
if cmd.fullRebase:
do_pull(None, cmd.destPath)
else:
do_pull(cmd.srcPath, cmd.destPath)
def take_volumes_shallow_backup(self, task_spec, volumes, dst_backup_paths):
if self._is_ft_vm():
self._take_volumes_top_drive_backup(task_spec, volumes, dst_backup_paths)
else:
self._take_volumes_shallow_block_copy(task_spec, volumes, dst_backup_paths)
def _take_volumes_top_drive_backup(self, task_spec, volumes, dst_backup_paths):
class DriveBackupDaemon(plugin.TaskDaemon):
def __init__(self, domain_uuid):
super(DriveBackupDaemon, self).__init__(task_spec, 'TakeVolumeBackup', report_progress=False)
self.domain_uuid = domain_uuid
def __exit__(self, exc_type, exc_val, exc_tb):
super(DriveBackupDaemon, self).__exit__(exc_type, exc_val, exc_tb)
os.unlink(tmp_workspace)
def _cancel(self):
logger.debug("cancel vm[uuid:%s] backup" % self.domain_uuid)
ImageStoreClient().stop_backup_jobs(self.domain_uuid)
def _get_percent(self):
pass
tmp_workspace = os.path.join(tempfile.gettempdir(), uuidhelper.uuid())
with DriveBackupDaemon(self.uuid):
self._do_take_volumes_top_drive_backup(volumes, dst_backup_paths, tmp_workspace)
def _do_take_volumes_top_drive_backup(self, volumes, dst_backup_paths, tmp_workspace):
args = {}
for volume in volumes:
target_disk, _ = self._get_target_disk(volume)
args[str(volume.deviceId)] = VmPlugin.get_backup_device_name(target_disk), 0
dst_workspace = os.path.join(os.path.dirname(dst_backup_paths['0']), 'workspace')
linux.mkdir(dst_workspace)
os.symlink(dst_workspace, tmp_workspace)
res = ImageStoreClient().top_backup_volumes(self.uuid, args.values(), tmp_workspace)
job_res = jsonobject.loads(res)
for device_id, dst_path in dst_backup_paths.items():
device_name = args[device_id][0]
back_path = os.path.join(dst_workspace, job_res[device_name].backupFile)
linux.mkdir(os.path.dirname(dst_path))
shutil.move(back_path, dst_path)
def _take_volumes_shallow_block_copy(self, task_spec, volumes, dst_backup_paths):
# type: (Vm, jsonobject.JsonObject, list[xmlobject.XmlObject], dict[str, str]) -> None
class VolumeInfo(object):
def __init__(self, dev_name):
self.dev_name = dev_name # type: str
self.end_time = None # type: float
class ShallowBackupDaemon(plugin.TaskDaemon):
def __init__(self, domain):
super(ShallowBackupDaemon, self).__init__(task_spec, 'TakeVolumeBackup', report_progress=False)
self.domain = domain
def _cancel(self):
logger.debug("cancel vm[uuid:%s] backup" % self.domain.name())
for v in volume_backup_info.values():
if self.domain.blockJobInfo(v.dev_name, 0):
self.domain.blockJobAbort(v.dev_name)
def _get_percent(self):
pass
volume_backup_info = {}
for volume in volumes:
target_disk, _ = self._get_target_disk(volume)
volume_backup_info[str(volume.deviceId)] = VolumeInfo(target_disk.target.dev_)
with ShallowBackupDaemon(self.domain):
self._do_take_volumes_shallow_block_copy(volume_backup_info, dst_backup_paths)
def _do_take_volumes_shallow_block_copy(self, volume_backup_info, dst_backup_paths):
dom = self.domain
flags = libvirt.VIR_DOMAIN_BLOCK_COPY_TRANSIENT_JOB | libvirt.VIR_DOMAIN_BLOCK_COPY_SHALLOW
for device_id, v in volume_backup_info.items():
vol_dir = os.path.dirname(dst_backup_paths[device_id])
linux.mkdir(vol_dir)
logger.info("start copying {}/{} ...".format(self.uuid, v.dev_name))
dom.blockCopy(v.dev_name, "<disk type='file'><source file='{}'/><driver type='qcow2'/></disk>"
.format(dst_backup_paths[device_id]), None, flags)
while time.sleep(5) or any(not v.end_time for v in volume_backup_info.values()):
for v in volume_backup_info.values():
if v.end_time:
continue
info = dom.blockJobInfo(v.dev_name, 0)
if not info:
raise Exception('blockjob not found on disk: ' + v.dev_name)
elif info['cur'] == info['end']:
v.end_time = time.time()
logger.info("completed copying {}/{} ...".format(self.uuid, v.dev_name))
with vm_operator.TemporaryPauseVmOperator(dom):
for v in volume_backup_info.values():
dom.blockJobAbort(v.dev_name)
@staticmethod
def from_virt_domain(domain):
vm = Vm()
vm.domain = domain
(state, _, _, _, _) = domain.info()
vm.state = Vm.power_state[state]
vm.domain_xml = domain.XMLDesc(0)
vm.domain_xmlobject = xmlobject.loads(vm.domain_xml)
vm.uuid = vm.domain_xmlobject.name.text_
return vm
@staticmethod
def from_StartVmCmd(cmd):
use_numa = cmd.useNuma
machine_type = get_machineType(cmd.machineType)
if HOST_ARCH == "aarch64" and cmd.bootMode == 'Legacy':
raise kvmagent.KvmError("Aarch64 does not support legacy, please change boot mode to UEFI instead of Legacy on your VM or Image.")
if cmd.architecture and cmd.architecture != HOST_ARCH:
raise kvmagent.KvmError("Image architecture[{}] not matched host architecture[{}].".format(cmd.architecture, HOST_ARCH))
default_bus_type = ('ide', 'sata', 'scsi')[max(machine_type == 'q35', (HOST_ARCH in ['aarch64', 'mips64el']) * 2)]
elements = {}
def make_root():
root = etree.Element('domain')
root.set('type', get_domain_type())
root.set('xmlns:qemu', 'http://libvirt.org/schemas/domain/qemu/1.0')
elements['root'] = root
def make_memory_backing():
root = elements['root']
backing = e(root, 'memoryBacking')
e(backing, "hugepages")
e(backing, "nosharepages")
e(backing, "allocation", attrib={'mode': 'immediate'})
def make_cpu():
if use_numa:
root = elements['root']
tune = e(root, 'cputune')
def on_x86_64():
e(root, 'vcpu', '128', {'placement': 'static', 'current': str(cmd.cpuNum)})
# e(root,'vcpu',str(cmd.cpuNum),{'placement':'static'})
if cmd.nestedVirtualization == 'host-model':
cpu = e(root, 'cpu', attrib={'mode': 'host-model'})
e(cpu, 'model', attrib={'fallback': 'allow'})
elif cmd.nestedVirtualization == 'host-passthrough':
cpu = e(root, 'cpu', attrib={'mode': 'host-passthrough'})
e(cpu, 'model', attrib={'fallback': 'allow'})
elif cmd.nestedVirtualization == 'custom':
cpu = e(root, 'cpu', attrib={'mode': 'custom', 'match': 'minimum'})
e(cpu, 'model', cmd.vmCpuModel, attrib={'fallback': 'allow'})
else:
cpu = e(root, 'cpu')
# e(cpu, 'topology', attrib={'sockets': str(cmd.socketNum), 'cores': str(cmd.cpuOnSocket), 'threads': '1'})
mem = cmd.memory / 1024
e(cpu, 'topology', attrib={'sockets': '32', 'cores': '4', 'threads': '1'})
numa = e(cpu, 'numa')
e(numa, 'cell', attrib={'id': '0', 'cpus': '0-127', 'memory': str(mem), 'unit': 'KiB'})
def on_aarch64():
cpu = e(root, 'cpu', attrib={'mode': 'custom'})
e(cpu, 'model', 'host', attrib={'fallback': 'allow'})
mem = cmd.memory / 1024
e(cpu, 'topology', attrib={'sockets': '32', 'cores': '4', 'threads': '1'})
numa = e(cpu, 'numa')
e(numa, 'cell', attrib={'id': '0', 'cpus': '0-127', 'memory': str(mem), 'unit': 'KiB'})
def on_mips64el():
e(root, 'vcpu', '8', {'placement': 'static', 'current': str(cmd.cpuNum)})
# e(root,'vcpu',str(cmd.cpuNum),{'placement':'static'})
cpu = e(root, 'cpu', attrib={'mode': 'custom', 'match': 'exact', 'check': 'partial'})
e(cpu, 'model', 'Loongson-3A4000-COMP', attrib={'fallback': 'allow'})
mem = cmd.memory / 1024
e(cpu, 'topology', attrib={'sockets': '2', 'cores': '4', 'threads': '1'})
numa = e(cpu, 'numa')
e(numa, 'cell', attrib={'id': '0', 'cpus': '0-7', 'memory': str(mem), 'unit': 'KiB'})
eval("on_{}".format(HOST_ARCH))()
else:
root = elements['root']
# e(root, 'vcpu', '128', {'placement': 'static', 'current': str(cmd.cpuNum)})
e(root, 'vcpu', str(cmd.cpuNum), {'placement': 'static'})
tune = e(root, 'cputune')
# enable nested virtualization
def on_x86_64():
if cmd.nestedVirtualization == 'host-model':
cpu = e(root, 'cpu', attrib={'mode': 'host-model'})
e(cpu, 'model', attrib={'fallback': 'allow'})
elif cmd.nestedVirtualization == 'host-passthrough':
cpu = e(root, 'cpu', attrib={'mode': 'host-passthrough'})
e(cpu, 'model', attrib={'fallback': 'allow'})
elif cmd.nestedVirtualization == 'custom':
cpu = e(root, 'cpu', attrib={'mode': 'custom'})
e(cpu, 'model', cmd.vmCpuModel, attrib={'fallback': 'allow'})
else:
cpu = e(root, 'cpu')
return cpu
def on_aarch64():
if is_virtual_machine():
cpu = e(root, 'cpu')
e(cpu, 'model', 'cortex-a57')
else :
cpu = e(root, 'cpu', attrib={'mode': 'host-passthrough'})
e(cpu, 'model', attrib={'fallback': 'allow'})
return cpu
def on_mips64el():
cpu = e(root, 'cpu', attrib={'mode': 'custom', 'match': 'exact', 'check': 'partial'})
e(cpu, 'model', 'Loongson-3A4000-COMP', attrib={'fallback': 'allow'})
return cpu
cpu = eval("on_{}".format(HOST_ARCH))()
e(cpu, 'topology', attrib={'sockets': str(cmd.socketNum), 'cores': str(cmd.cpuOnSocket), 'threads': '1'})
if cmd.addons.cpuPinning:
for rule in cmd.addons.cpuPinning:
e(tune, 'vcpupin', attrib={'vcpu': str(rule.vCpu), 'cpuset': rule.pCpuSet})
def make_memory():
root = elements['root']
mem = cmd.memory / 1024
if use_numa:
e(root, 'maxMemory', str(34359738368), {'slots': str(16), 'unit': 'KiB'})
# e(root,'memory',str(mem),{'unit':'k'})
e(root, 'currentMemory', str(mem), {'unit': 'k'})
else:
e(root, 'memory', str(mem), {'unit': 'k'})
e(root, 'currentMemory', str(mem), {'unit': 'k'})
def make_os():
root = elements['root']
os = e(root, 'os')
host_arch = kvmagent.os_arch
def on_x86_64():
e(os, 'type', 'hvm', attrib={'machine': machine_type})
# if boot mode is UEFI
if cmd.bootMode == "UEFI":
e(os, 'loader', '/usr/share/edk2.git/ovmf-x64/OVMF_CODE-pure-efi.fd', attrib={'readonly': 'yes', 'type': 'pflash'})
e(os, 'nvram', '/var/lib/libvirt/qemu/nvram/%s.fd' % cmd.vmInstanceUuid, attrib={'template': '/usr/share/edk2.git/ovmf-x64/OVMF_VARS-pure-efi.fd'})
elif cmd.bootMode == "UEFI_WITH_CSM":
e(os, 'loader', '/usr/share/edk2.git/ovmf-x64/OVMF_CODE-with-csm.fd', attrib={'readonly': 'yes', 'type': 'pflash'})
e(os, 'nvram', '/var/lib/libvirt/qemu/nvram/%s.fd' % cmd.vmInstanceUuid, attrib={'template': '/usr/share/edk2.git/ovmf-x64/OVMF_VARS-with-csm.fd'})
elif cmd.addons['loaderRom'] is not None:
e(os, 'loader', cmd.addons['loaderRom'], {'type': 'rom'})
def on_aarch64():
def on_redhat():
e(os, 'type', 'hvm', attrib={'arch': 'aarch64', 'machine': machine_type})
e(os, 'loader', '/usr/share/edk2/aarch64/QEMU_EFI-pflash.raw', attrib={'readonly': 'yes', 'type': 'pflash'})
e(os, 'nvram', '/var/lib/libvirt/qemu/nvram/%s.fd' % cmd.vmInstanceUuid, attrib={'template': '/usr/share/edk2/aarch64/vars-template-pflash.raw'})
def on_debian():
e(os, 'type', 'hvm', attrib={'arch': 'aarch64', 'machine': machine_type})
e(os, 'loader', '/usr/share/OVMF/QEMU_EFI-pflash.raw', attrib={'readonly': 'yes', 'type': 'rom'})
e(os, 'nvram', '/var/lib/libvirt/qemu/nvram/%s.fd' % cmd.vmInstanceUuid, attrib={'template': '/usr/share/OVMF/vars-template-pflash.raw'})
eval("on_{}".format(kvmagent.get_host_os_type()))()
def on_mips64el():
e(os, 'type', 'hvm', attrib={'arch': 'mips64el', 'machine': 'loongson3a'})
e(os, 'loader', '/usr/share/qemu/ls3a_bios.bin', attrib={'readonly': 'yes', 'type': 'rom'})
eval("on_{}".format(host_arch))()
if cmd.useBootMenu:
e(os, 'bootmenu', attrib={'enable': 'yes'})
if cmd.systemSerialNumber and HOST_ARCH != 'mips64el':
e(os, 'smbios', attrib={'mode': 'sysinfo'})
def make_sysinfo():
if not cmd.systemSerialNumber:
return
root = elements['root']
sysinfo = e(root, 'sysinfo', attrib={'type': 'smbios'})
system = e(sysinfo, 'system')
e(system, 'entry', cmd.systemSerialNumber, attrib={'name': 'serial'})
if cmd.chassisAssetTag is not None:
chassis = e(sysinfo, 'chassis')
e(chassis, 'entry', cmd.chassisAssetTag, attrib={'name': 'asset'})
def make_features():
root = elements['root']
features = e(root, 'features')
for f in ['apic', 'pae']:
e(features, f)
@linux.with_arch(todo_list=['x86_64'])
def make_acpi():
e(features, 'acpi')
make_acpi()
if cmd.kvmHiddenState is True:
kvm = e(features, "kvm")
e(kvm, 'hidden', None, {'state': 'on'})
if cmd.vmPortOff is True:
e(features, 'vmport', attrib={'state': 'off'})
if cmd.emulateHyperV is True:
hyperv = e(features, "hyperv")
e(hyperv, 'relaxed', attrib={'state': 'on'})
e(hyperv, 'vapic', attrib={'state': 'on'})
if is_hv_freq_supported(): e(hyperv, 'frequencies', attrib={'state': 'on'})
e(hyperv, 'spinlocks', attrib={'state': 'on', 'retries': '4096'})
e(hyperv, 'vendor_id', attrib={'state': 'on', 'value': 'ZStack_Org'})
# always set ioapic driver to kvm after libvirt 3.4.0
if is_ioapic_supported():
e(features, "ioapic", attrib={'driver': 'kvm'})
if get_gic_version(cmd.cpuNum) == 2:
e(features, "gic", attrib={'version': '2'})
def make_qemu_commandline():
if not os.path.exists(QMP_SOCKET_PATH):
os.mkdir(QMP_SOCKET_PATH)
root = elements['root']
qcmd = e(root, 'qemu:commandline')
vendor_id, model_name = linux.get_cpu_model()
if "hygon" in model_name.lower():
if isinstance(cmd.imagePlatform, str) and cmd.imagePlatform.lower() not in ["other", "paravirtualization"]:
e(qcmd, "qemu:arg", attrib={"value": "-cpu"})
e(qcmd, "qemu:arg", attrib={"value": "EPYC,vendor=AuthenticAMD,model_id={} Processor,+svm".format(" ".join(model_name.split(" ")[0:3]))})
else:
e(qcmd, "qemu:arg", attrib={"value": "-qmp"})
e(qcmd, "qemu:arg", attrib={"value": "unix:{}/{}.sock,server,nowait".format(QMP_SOCKET_PATH, cmd.vmInstanceUuid)})
args = cmd.addons['qemuCommandLine']
if args is not None:
for arg in args:
e(qcmd, "qemu:arg", attrib={"value": arg.strip('"')})
if cmd.useColoBinary:
e(qcmd, "qemu:arg", attrib={"value": '-L'})
e(qcmd, "qemu:arg", attrib={"value": '/usr/share/qemu-kvm/'})
if cmd.coloPrimary:
e(qcmd, "qemu:arg", attrib={"value": '-L'})
e(qcmd, "qemu:arg", attrib={"value": '/usr/share/qemu-kvm/'})
count = 0
primary_host_ip = cmd.addons['primaryVmHostIp']
for config in cmd.addons['primaryVmNicConfig']:
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=zs-mirror-%s,host=%s,port=%s,server,nowait'
% (count, primary_host_ip, config.mirrorPort)})
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=primary-in-s-%s,host=%s,port=%s,server,nowait'
% (count, primary_host_ip, config.primaryInPort)})
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=secondary-in-s-%s,host=%s,port=%s,server,nowait'
% (count, primary_host_ip, config.secondaryInPort)})
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=primary-in-c-%s,host=%s,port=%s,nowait'
% (count, primary_host_ip, config.primaryInPort)})
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=primary-out-s-%s,host=%s,port=%s,server,nowait'
% (count, primary_host_ip, config.primaryOutPort)})
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=primary-out-c-%s,host=%s,port=%s,nowait'
% (count, primary_host_ip, config.primaryOutPort)})
count += 1
e(qcmd, "qemu:arg", attrib={"value": '-monitor'})
e(qcmd, "qemu:arg", attrib={"value": 'tcp:%s:%s,server,nowait' % (primary_host_ip, cmd.addons['primaryMonitorPort'])})
elif cmd.coloSecondary:
e(qcmd, "qemu:arg", attrib={"value": '-L'})
e(qcmd, "qemu:arg", attrib={"value": '/usr/share/qemu-kvm/'})
count = 0
for config in cmd.addons['ftSecondaryVmNicConfig']:
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=red-mirror-%s,host=%s,port=%s'
% (count, cmd.addons['primaryVmHostIp'], config.mirrorPort)})
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=red-secondary-%s,host=%s,port=%s'
% (count, cmd.addons['primaryVmHostIp'], config.secondaryInPort)})
e(qcmd, "qemu:arg", attrib={"value": '-object'})
e(qcmd, "qemu:arg", attrib={"value": 'filter-redirector,id=fr-mirror-%s,netdev=hostnet%s,queue=tx,'
'indev=red-mirror-%s' % (count, count, count)})
e(qcmd, "qemu:arg", attrib={"value": '-object'})
e(qcmd, "qemu:arg", attrib={"value": 'filter-redirector,id=fr-secondary-%s,netdev=hostnet%s,'
'queue=rx,outdev=red-secondary-%s' % (count, count, count)})
e(qcmd, "qemu:arg", attrib={"value": '-object'})
e(qcmd, "qemu:arg", attrib={"value": 'filter-rewriter,id=rew-%s,netdev=hostnet%s,queue=all'
% (count, count)})
count += 1
block_replication_port = cmd.addons['blockReplicationPort']
secondary_vm_host_ip = cmd.addons['secondaryVmHostIp']
e(qcmd, "qemu:arg", attrib={"value": '-incoming'})
e(qcmd, "qemu:arg", attrib={"value": 'tcp:%s:%s' % (secondary_vm_host_ip, block_replication_port)})
secondary_monitor_port = cmd.addons['secondaryMonitorPort']
e(qcmd, "qemu:arg", attrib={"value": '-monitor'})
e(qcmd, "qemu:arg", attrib={"value": 'tcp:%s:%s,server,nowait' % (secondary_vm_host_ip, secondary_monitor_port)})
def make_devices():
root = elements['root']
devices = e(root, 'devices')
if cmd.addons and cmd.addons['qemuPath']:
e(devices, 'emulator', cmd.addons['qemuPath'])
else:
if cmd.coloPrimary or cmd.coloSecondary or cmd.useColoBinary:
e(devices, 'emulator', kvmagent.get_colo_qemu_path())
else:
e(devices, 'emulator', kvmagent.get_qemu_path())
@linux.with_arch(todo_list=['aarch64', 'mips64el'])
def set_keyboard():
keyboard = e(devices, 'input', None, {'type': 'keyboard', 'bus': 'usb'})
e(keyboard, 'address', None, {'type': 'usb', 'bus': '0', 'port': '2'})
def set_tablet():
tablet = e(devices, 'input', None, {'type': 'tablet', 'bus': 'usb'})
e(tablet, 'address', None, {'type':'usb', 'bus':'0', 'port':'1'})
# no default usb controller and tablet device for appliance vm
if cmd.isApplianceVm:
e(devices, 'controller', None, {'type': 'usb', 'model': 'ehci'})
set_keyboard()
else:
set_keyboard()
set_tablet()
elements['devices'] = devices
def make_cdrom():
devices = elements['devices']
max_cdrom_num = len(Vm.ISO_DEVICE_LETTERS)
empty_cdrom_configs = None
if HOST_ARCH in ['aarch64', 'mips64el']:
# SCSI controller only supports 1 bus
empty_cdrom_configs = [
EmptyCdromConfig('sd%s' % Vm.ISO_DEVICE_LETTERS[0], '0', Vm.get_iso_device_unit(0)),
EmptyCdromConfig('sd%s' % Vm.ISO_DEVICE_LETTERS[1], '0', Vm.get_iso_device_unit(1)),
EmptyCdromConfig('sd%s' % Vm.ISO_DEVICE_LETTERS[2], '0', Vm.get_iso_device_unit(2))
]
else:
if cmd.fromForeignHypervisor:
cdroms = cmd.addons['FIXED_CDROMS']
if cdroms is None:
empty_cdrom_configs = [
EmptyCdromConfig('hd%s' % Vm.ISO_DEVICE_LETTERS[0], '0', '1')
]
else:
cdrom_device_id_list = cdroms.split(',')
empty_cdrom_configs = []
for i in xrange(len(cdrom_device_id_list)):
empty_cdrom_configs.append(
EmptyCdromConfig('hd%s' % Vm.ISO_DEVICE_LETTERS[i], str(i / 2), str(i % 2)))
elif machine_type == 'q35':
# bus 0 unit 0 already use by root volume if it is on sata
empty_cdrom_configs = [
EmptyCdromConfig('hd%s' % Vm.ISO_DEVICE_LETTERS[0], '0', '1'),
EmptyCdromConfig('hd%s' % Vm.ISO_DEVICE_LETTERS[1], '0', '2'),
EmptyCdromConfig('hd%s' % Vm.ISO_DEVICE_LETTERS[2], '0', '3'),
]
else: # machine_type=pc
# bus 0 unit 0 already use by root volume if it is on ide
empty_cdrom_configs = [
EmptyCdromConfig('hd%s' % Vm.ISO_DEVICE_LETTERS[0], '0', '1'),
EmptyCdromConfig('hd%s' % Vm.ISO_DEVICE_LETTERS[1], '1', '0'),
EmptyCdromConfig('hd%s' % Vm.ISO_DEVICE_LETTERS[2], '1', '1')
]
if len(empty_cdrom_configs) != max_cdrom_num:
logger.error('ISO_DEVICE_LETTERS or EMPTY_CDROM_CONFIGS config error')
def make_empty_cdrom(target_dev, bus, unit, bootOrder):
cdrom = e(devices, 'disk', None, {'type': 'file', 'device': 'cdrom'})
e(cdrom, 'driver', None, {'name': 'qemu', 'type': 'raw'})
e(cdrom, 'target', None, {'dev': target_dev, 'bus': default_bus_type})
e(cdrom, 'address', None, {'type': 'drive', 'bus': bus, 'unit': unit})
e(cdrom, 'readonly', None)
if bootOrder is not None and bootOrder > 0:
e(cdrom, 'boot', None, {'order': str(bootOrder)})
return cdrom
"""
if not cmd.bootIso:
for config in empty_cdrom_configs:
makeEmptyCdrom(config.targetDev, config.bus, config.unit)
return
"""
if not cmd.cdRoms:
return
for iso in cmd.cdRoms:
cdrom_config = empty_cdrom_configs[iso.deviceId]
if iso.isEmpty:
make_empty_cdrom(cdrom_config.targetDev, cdrom_config.bus, cdrom_config.unit, iso.bootOrder)
continue
if iso.path.startswith('ceph'):
ic = IsoCeph()
ic.iso = iso
devices.append(ic.to_xmlobject(cdrom_config.targetDev, default_bus_type, cdrom_config.bus, cdrom_config.unit, iso.bootOrder))
else:
cdrom = make_empty_cdrom(cdrom_config.targetDev, cdrom_config.bus , cdrom_config.unit, iso.bootOrder)
e(cdrom, 'source', None, {'file': iso.path})
def make_volumes():
devices = elements['devices']
#guarantee rootVolume is the first of the set
volumes = [cmd.rootVolume]
volumes.extend(cmd.dataVolumes)
#When platform=other and default_bus_type=ide, the maximum number of volume is three
volume_ide_configs = [
VolumeIDEConfig('0', '0'),
VolumeIDEConfig('1', '1'),
VolumeIDEConfig('1', '0')
]
def quorumbased_volume(_dev_letter, _v):
def make_backingstore(volume_path):
disk = etree.Element('disk', {'type': 'quorum', 'device': 'disk', 'threshold': '1', 'mode': 'primary' if cmd.coloPrimary else 'secondary'})
paths = linux.qcow2_get_file_chain(volume_path)
if len(paths) == 0:
# could not read qcow2
raise Exception("could not read qcow2")
backingStore = None
for path in paths:
logger.debug('disk path %s' % path)
xml = etree.tostring(disk)
logger.debug('disk xml is %s' % xml)
if backingStore:
backingStore = e(backingStore, 'backingStore', None, {'type': 'file'})
else:
backingStore = e(disk, 'backingStore', None, {'type': 'file'})
# if backingStore:
# backingStore = e(backingStore, 'backingStore', None, {'type': 'file'})
# else:
# backingStore = e(disk, 'backingStore', None, {'type': 'file'})
e(backingStore, 'format', None, {'type': 'qcow2'})
xml = etree.tostring(disk)
logger.debug('disk xml is %s' % xml)
if cmd.coloSecondary:
e(backingStore, 'active', None, {'file': cmd.cacheVolumes[0].installPath})
e(backingStore, 'hidden', None, {'file': cmd.cacheVolumes[1].installPath})
e(backingStore, 'source', None, {'file': path})
return disk
disk = make_backingstore(_v.installPath)
if _v.useVirtio:
e(disk, 'target', None, {'dev': 'vd%s' % _dev_letter, 'bus': 'virtio'})
else:
dev_format = Vm._get_disk_target_dev_format(default_bus_type)
e(disk, 'target', None, {'dev': dev_format % _dev_letter, 'bus': default_bus_type})
if default_bus_type == "ide" and cmd.imagePlatform.lower() == "other":
allocat_ide_config(disk)
return disk
def filebased_volume(_dev_letter, _v):
disk = etree.Element('disk', {'type': 'file', 'device': 'disk', 'snapshot': 'external'})
if cmd.addons and cmd.addons['useDataPlane'] is True:
e(disk, 'driver', None, {'name': 'qemu', 'type': linux.get_img_fmt(_v.installPath), 'cache': _v.cacheMode, 'queues':'1', 'dataplane': 'on'})
else:
e(disk, 'driver', None, {'name': 'qemu', 'type': linux.get_img_fmt(_v.installPath), 'cache': _v.cacheMode})
e(disk, 'source', None, {'file': _v.installPath})
if _v.shareable:
e(disk, 'shareable')
if _v.useVirtioSCSI:
e(disk, 'target', None, {'dev': 'sd%s' % _dev_letter, 'bus': 'scsi'})
e(disk, 'wwn', _v.wwn)
return disk
if _v.useVirtio:
e(disk, 'target', None, {'dev': 'vd%s' % _dev_letter, 'bus': 'virtio'})
else:
dev_format = Vm._get_disk_target_dev_format(default_bus_type)
e(disk, 'target', None, {'dev': dev_format % _dev_letter, 'bus': default_bus_type})
if default_bus_type == "ide" and cmd.imagePlatform.lower() == "other":
allocat_ide_config(disk)
return disk
def iscsibased_volume(_dev_letter, _v):
def blk_iscsi():
bi = BlkIscsi()
portal, bi.target, bi.lun = _v.installPath.lstrip('iscsi://').split('/')
bi.server_hostname, bi.server_port = portal.split(':')
bi.device_letter = _dev_letter
bi.volume_uuid = _v.volumeUuid
bi.chap_username = _v.chapUsername
bi.chap_password = _v.chapPassword
return bi.to_xmlobject()
def virtio_iscsi():
vi = VirtioIscsi()
portal, vi.target, vi.lun = _v.installPath.lstrip('iscsi://').split('/')
vi.server_hostname, vi.server_port = portal.split(':')
vi.device_letter = _dev_letter
vi.volume_uuid = _v.volumeUuid
vi.chap_username = _v.chapUsername
vi.chap_password = _v.chapPassword
return vi.to_xmlobject()
if _v.useVirtio:
return virtio_iscsi()
else:
return blk_iscsi()
def ceph_volume(_dev_letter, _v):
def ceph_virtio():
vc = VirtioCeph()
vc.volume = _v
vc.dev_letter = _dev_letter
return vc.to_xmlobject()
def ceph_blk():
ic = BlkCeph()
ic.volume = _v
ic.dev_letter = _dev_letter
ic.bus_type = default_bus_type
return ic.to_xmlobject()
def ceph_virtio_scsi():
vsc = VirtioSCSICeph()
vsc.volume = _v
vsc.dev_letter = _dev_letter
return vsc.to_xmlobject()
def build_ceph_disk():
if _v.useVirtioSCSI:
disk = ceph_virtio_scsi()
if _v.shareable:
e(disk, 'shareable')
return disk
if _v.useVirtio:
return ceph_virtio()
else:
disk = ceph_blk()
if default_bus_type == "ide" and cmd.imagePlatform.lower() == "other":
allocat_ide_config(disk)
return disk
d = build_ceph_disk()
if _v.physicalBlockSize:
e(d, 'blockio', None, {'physical_block_size': str(_v.physicalBlockSize)})
return d
def spool_volume(_dev_letter, _v):
imgfmt = linux.get_img_fmt(_v.installPath)
disk = etree.Element('disk', {'type': 'network', 'device': 'disk'})
e(disk, 'driver', None,
{'name': 'qemu', 'type': 'raw', 'cache': 'none', 'io': 'native'})
e(disk, 'source', None,
{'protocol': 'spool', 'name': make_spool_conf(imgfmt, _dev_letter, _v)})
e(disk, 'target', None, {'dev': 'vd%s' % _dev_letter, 'bus': 'virtio'})
return disk
def block_volume(_dev_letter, _v):
disk = etree.Element('disk', {'type': 'block', 'device': 'disk', 'snapshot': 'external'})
e(disk, 'driver', None,
{'name': 'qemu', 'type': 'raw', 'cache': 'none', 'io': 'native'})
e(disk, 'source', None, {'dev': _v.installPath})
if _v.useVirtioSCSI:
e(disk, 'target', None, {'dev': 'sd%s' % _dev_letter, 'bus': 'scsi'})
e(disk, 'wwn', _v.wwn)
else:
e(disk, 'target', None, {'dev': 'vd%s' % _dev_letter, 'bus': 'virtio'})
return disk
def volume_qos(volume_xml_obj):
if not cmd.addons:
return
vol_qos = cmd.addons['VolumeQos']
if not vol_qos:
return
qos = vol_qos[v.volumeUuid]
if not qos:
return
if not qos.totalBandwidth and not qos.totalIops:
return
iotune = e(volume_xml_obj, 'iotune')
if qos.totalBandwidth:
e(iotune, 'total_bytes_sec', str(qos.totalBandwidth))
if qos.totalIops:
# e(iotune, 'total_iops_sec', str(qos.totalIops))
e(iotune, 'read_iops_sec', str(qos.totalIops))
e(iotune, 'write_iops_sec', str(qos.totalIops))
# e(iotune, 'read_iops_sec_max', str(qos.totalIops))
# e(iotune, 'write_iops_sec_max', str(qos.totalIops))
# e(iotune, 'total_iops_sec_max', str(qos.totalIops))
def volume_native_aio(volume_xml_obj):
if not cmd.addons:
return
vol_aio = cmd.addons['NativeAio']
if not vol_aio:
return
drivers = volume_xml_obj.getiterator("driver")
if drivers is None or len(drivers) == 0:
return
drivers[0].set("io", "native")
def allocat_ide_config(_disk):
if len(volume_ide_configs) == 0:
err = "insufficient IDE address."
logger.warn(err)
raise kvmagent.KvmError(err)
volume_ide_config = volume_ide_configs.pop(0)
e(_disk, 'address', None, {'type': 'drive', 'bus': volume_ide_config.bus, 'unit': volume_ide_config.unit})
if default_bus_type == "ide" and cmd.imagePlatform.lower() == "other":
Vm.DEVICE_LETTERS=Vm.DEVICE_LETTERS.replace('de','')
volumes.sort(key=lambda d: d.deviceId)
scsi_device_ids = [v.deviceId for v in volumes if v.useVirtioSCSI]
for v in volumes:
if v.deviceId >= len(Vm.DEVICE_LETTERS):
err = "exceeds max disk limit, device id[%s], but only 0 ~ %d are allowed" % (v.deviceId, len(Vm.DEVICE_LETTERS) - 1)
logger.warn(err)
raise kvmagent.KvmError(err)
dev_letter = Vm.DEVICE_LETTERS[v.deviceId]
if v.useVirtioSCSI:
scsi_device_id = scsi_device_ids.pop()
if scsi_device_id >= len(Vm.DEVICE_LETTERS):
err = "exceeds max disk limit, device id[%s], but only 0 ~ %d are allowed" % (scsi_device_id, len(Vm.DEVICE_LETTERS) - 1)
logger.warn(err)
raise kvmagent.KvmError(err)
dev_letter = Vm.DEVICE_LETTERS[scsi_device_id]
if v.deviceType == 'quorum':
vol = quorumbased_volume(dev_letter, v)
elif v.deviceType == 'file':
vol = filebased_volume(dev_letter, v)
elif v.deviceType == 'iscsi':
vol = iscsibased_volume(dev_letter, v)
elif v.deviceType == 'ceph':
vol = ceph_volume(dev_letter, v)
elif v.deviceType == 'block':
vol = block_volume(dev_letter, v)
elif v.deviceType == 'spool':
vol = spool_volume(dev_letter, v)
else:
raise Exception('unknown volume deviceType: %s' % v.deviceType)
assert vol is not None, 'vol cannot be None'
Vm.set_device_address(vol, v)
if v.bootOrder is not None and v.bootOrder > 0 and v.deviceId == 0:
e(vol, 'boot', None, {'order': str(v.bootOrder)})
Vm.set_volume_qos(cmd.addons, v.volumeUuid, vol)
Vm.set_volume_serial_id(v.volumeUuid, vol)
volume_native_aio(vol)
devices.append(vol)
def make_nics():
if not cmd.nics:
return
def addon(nic_xml_object):
if cmd.addons and cmd.addons['NicQos'] and cmd.addons['NicQos'][nic.uuid]:
qos = cmd.addons['NicQos'][nic.uuid]
Vm._add_qos_to_interface(nic_xml_object, qos)
if cmd.coloPrimary or cmd.coloSecondary:
Vm._ignore_colo_vm_nic_rom_file_on_interface(nic_xml_object)
devices = elements['devices']
vhostSrcPath = cmd.addons['vhostSrcPath'] if cmd.addons else None
brMode = cmd.addons['brMode'] if cmd.addons else None
for index, nic in enumerate(cmd.nics):
interface = Vm._build_interface_xml(nic, devices, vhostSrcPath, 'Attach', brMode, index)
addon(interface)
def make_meta():
root = elements['root']
e(root, 'name', cmd.vmInstanceUuid)
if cmd.coloPrimary or cmd.coloSecondary:
e(root, 'iothreads', str(len(cmd.nics)))
e(root, 'uuid', uuidhelper.to_full_uuid(cmd.vmInstanceUuid))
e(root, 'description', cmd.vmName)
e(root, 'on_poweroff', 'destroy')
e(root, 'on_reboot', 'restart')
on_crash = cmd.addons['onCrash']
if on_crash is None:
on_crash = 'restart'
e(root, 'on_crash', on_crash)
meta = e(root, 'metadata')
zs = e(meta, 'zstack', usenamesapce=True)
e(zs, 'internalId', str(cmd.vmInternalId))
e(zs, 'hostManagementIp', str(cmd.hostManagementIp))
# <clock offset="utc" />
clock = e(root, 'clock', None, {'offset': cmd.clock})
# <rom bar='off'/>
if cmd.clock == 'localtime':
if cmd.clockTrack:
e(clock, 'timer', None, {'name': 'rtc', 'tickpolicy': 'catchup', 'track': cmd.clockTrack})
else:
e(clock, 'timer', None, {'name': 'rtc', 'tickpolicy': 'catchup'})
e(clock, 'timer', None, {'name': 'pit', 'tickpolicy': 'delay'})
e(clock, 'timer', None, {'name': 'hpet', 'present': 'no'})
e(clock, 'timer', None, {'name': 'hypervclock', 'present': 'yes'})
def make_vnc():
devices = elements['devices']
if cmd.consolePassword == None:
vnc = e(devices, 'graphics', None, {'type': 'vnc', 'port': '5900', 'autoport': 'yes'})
else:
vnc = e(devices, 'graphics', None,
{'type': 'vnc', 'port': '5900', 'autoport': 'yes', 'passwd': str(cmd.consolePassword)})
e(vnc, "listen", None, {'type': 'address', 'address': '0.0.0.0'})
def make_spice():
devices = elements['devices']
if cmd.consolePassword == None:
spice = e(devices, 'graphics', None, {'type': 'spice', 'port': '5900', 'autoport': 'yes'})
else:
spice = e(devices, 'graphics', None,
{'type': 'spice', 'port': '5900', 'autoport': 'yes', 'passwd': str(cmd.consolePassword)})
e(spice, "listen", None, {'type': 'address', 'address': '0.0.0.0'})
if is_spice_tls() == 0 and cmd.spiceChannels != None:
for channel in cmd.spiceChannels:
e(spice, "channel", None, {'name': channel, 'mode': "secure"})
e(spice, "image", None, {'compression': 'auto_glz'})
e(spice, "jpeg", None, {'compression': 'always'})
e(spice, "zlib", None, {'compression': 'never'})
e(spice, "playback", None, {'compression': 'off'})
e(spice, "streaming", None, {'mode': cmd.spiceStreamingMode})
e(spice, "mouse", None, {'mode': 'client'})
e(spice, "filetransfer", None, {'enable': 'yes'})
e(spice, "clipboard", None, {'copypaste': 'yes'})
def make_folder_sharing():
devices = elements['devices']
chan = e(devices, 'channel', None, {'type': 'spiceport'})
e(chan, 'source', None, {'channel': 'org.spice-space.webdav.0'})
e(chan, 'target', None, {'type': 'virtio', 'name': 'org.spice-space.webdav.0'})
def make_usb_redirect():
devices = elements['devices']
e(devices, 'controller', None, {'type': 'usb', 'index': '0'})
# make sure there are three usb controllers, each for USB 1.1/2.0/3.0
@linux.on_redhat_based(DIST_NAME)
@linux.with_arch(todo_list=['aarch64'])
def set_default():
# for aarch64 centos, only support default controller(qemu-xhci 3.0) on current qemu version(2.12_0-18)
e(devices, 'controller', None, {'type': 'usb', 'index': '1'})
e(devices, 'controller', None, {'type': 'usb', 'index': '2'})
return True
def set_usb2_3():
e(devices, 'controller', None, {'type': 'usb', 'index': '1', 'model': 'ehci'})
e(devices, 'controller', None, {'type': 'usb', 'index': '2', 'model': 'nec-xhci'})
# USB2.0 Controller for redirect
e(devices, 'controller', None, {'type': 'usb', 'index': '3', 'model': 'ehci'})
e(devices, 'controller', None, {'type': 'usb', 'index': '4', 'model': 'nec-xhci'})
def set_redirdev():
chan = e(devices, 'channel', None, {'type': 'spicevmc'})
e(chan, 'target', None, {'type': 'virtio', 'name': 'com.redhat.spice.0'})
e(chan, 'address', None, {'type': 'virtio-serial'})
redirdev1 = e(devices, 'redirdev', None, {'type': 'spicevmc', 'bus': 'usb'})
e(redirdev1, 'address', None, {'type': 'usb', 'bus': '3', 'port': '1'})
redirdev2 = e(devices, 'redirdev', None, {'type': 'spicevmc', 'bus': 'usb'})
e(redirdev2, 'address', None, {'type': 'usb', 'bus': '3', 'port': '2'})
redirdev3 = e(devices, 'redirdev', None, {'type': 'spicevmc', 'bus': 'usb'})
e(redirdev3, 'address', None, {'type': 'usb', 'bus': '4', 'port': '1'})
redirdev4 = e(devices, 'redirdev', None, {'type': 'spicevmc', 'bus': 'usb'})
e(redirdev4, 'address', None, {'type': 'usb', 'bus': '4', 'port': '2'})
if set_default():
return
set_usb2_3()
set_redirdev()
def make_video():
devices = elements['devices']
if HOST_ARCH == 'aarch64':
video = e(devices, 'video')
e(video, 'model', None, {'type': 'virtio'})
elif cmd.videoType != "qxl":
video = e(devices, 'video')
e(video, 'model', None, {'type': str(cmd.videoType)})
else:
for monitor in range(cmd.VDIMonitorNumber):
video = e(devices, 'video')
if cmd.qxlMemory is not None:
e(video, 'model', None, {'type': str(cmd.videoType), 'ram': str(cmd.qxlMemory.ram), 'vram': str(cmd.qxlMemory.vram),
'vgamem': str(cmd.qxlMemory.vgamem)})
else:
e(video, 'model', None, {'type': str(cmd.videoType)})
def make_sound():
if cmd.consoleMode == 'spice' or cmd.consoleMode == 'vncAndSpice':
devices = elements['devices']
if cmd.soundType is not None:
e(devices, 'sound', None, {'model': str(cmd.soundType)})
else:
e(devices, 'sound', None, {'model': 'ich6'})
def make_graphic_console():
if cmd.consoleMode == 'spice':
make_spice()
elif cmd.consoleMode == "vnc":
make_vnc()
elif cmd.consoleMode == "vncAndSpice":
make_vnc()
make_spice()
else:
return
def make_addons():
if not cmd.addons:
return
devices = elements['devices']
channel = cmd.addons['channel']
if channel:
basedir = os.path.dirname(channel.socketPath)
linux.mkdir(basedir, 0777)
chan = e(devices, 'channel', None, {'type': 'unix'})
e(chan, 'source', None, {'mode': 'bind', 'path': channel.socketPath})
e(chan, 'target', None, {'type': 'virtio', 'name': channel.targetName})
cephSecretKey = cmd.addons['ceph_secret_key']
cephSecretUuid = cmd.addons['ceph_secret_uuid']
if cephSecretKey and cephSecretUuid:
VmPlugin._create_ceph_secret_key(cephSecretKey, cephSecretUuid)
pciDevices = cmd.addons['pciDevice']
if pciDevices:
make_pci_device(pciDevices)
mdevDevices = cmd.addons['mdevDevice']
if mdevDevices:
make_mdev_device(mdevDevices)
storageDevices = cmd.addons['storageDevice']
if storageDevices:
make_storage_device(storageDevices)
usbDevices = cmd.addons['usbDevice']
if usbDevices:
make_usb_device(usbDevices)
# FIXME: manage scsi device in one place.
def make_storage_device(storageDevices):
lvm.unpriv_sgio()
devices = elements['devices']
for volume in storageDevices:
if match_storage_device(volume.installPath):
disk = e(devices, 'disk', None, attrib={'type': 'block', 'device': 'lun', 'sgio': get_sgio_value()})
e(disk, 'driver', None, {'name': 'qemu', 'type': 'raw'})
e(disk, 'source', None, {'dev': volume.installPath})
e(disk, 'target', None, {'dev': 'sd%s' % Vm.DEVICE_LETTERS[volume.deviceId], 'bus': 'scsi'})
Vm.set_device_address(disk, volume)
def make_pci_device(pciDevices):
devices = elements['devices']
for pci in pciDevices:
addr, spec_uuid = pci.split(',')
ret, out, err = bash.bash_roe("virsh nodedev-detach pci_%s" % addr.replace(':', '_').replace('.', '_'))
if ret != 0:
raise kvmagent.KvmError('failed to nodedev-detach %s: %s, %s' % (addr, out, err))
if match_pci_device(addr):
hostdev = e(devices, "hostdev", None, {'mode': 'subsystem', 'type': 'pci', 'managed': 'no'})
e(hostdev, "driver", None, {'name': 'vfio'})
source = e(hostdev, "source")
e(source, "address", None, {
"domain": hex(0) if len(addr.split(":")) == 2 else hex(int(addr.split(":")[0], 16)),
"bus": hex(int(addr.split(":")[-2], 16)),
"slot": hex(int(addr.split(":")[-1].split(".")[0], 16)),
"function": hex(int(addr.split(":")[-1].split(".")[1], 16))
})
else:
raise kvmagent.KvmError(
'can not find pci device for address %s' % addr)
if spec_uuid:
rom_file = os.path.join(PCI_ROM_PATH, spec_uuid)
# only turn bar on when rom file exists
if os.path.exists(rom_file):
e(hostdev, "rom", None, {'bar': 'on', 'file': rom_file})
def make_mdev_device(mdevUuids):
devices = elements['devices']
for mdevUuid in mdevUuids:
hostdev = e(devices, "hostdev", None, {'mode': 'subsystem', 'type': 'mdev', 'model': 'vfio-pci', 'managed': 'yes'})
source = e(hostdev, "source")
# convert mdevUuid to 8-4-4-4-12 format
e(source, "address", None, { "uuid": uuidhelper.to_full_uuid(mdevUuid) })
def make_usb_device(usbDevices):
if HOST_ARCH in ['aarch64', 'mips64el']:
next_uhci_port = 3
else:
next_uhci_port = 2
next_ehci_port = 1
next_xhci_port = 1
devices = elements['devices']
for usb in usbDevices:
if match_usb_device(usb):
if usb.split(":")[5] == "PassThrough":
hostdev = e(devices, "hostdev", None, {'mode': 'subsystem', 'type': 'usb', 'managed': 'yes'})
source = e(hostdev, "source")
e(source, "address", None, {
"bus": str(int(usb.split(":")[0])),
"device": str(int(usb.split(":")[1]))
})
e(source, "vendor", None, {
"id": hex(int(usb.split(":")[2], 16))
})
e(source, "product", None, {
"id": hex(int(usb.split(":")[3], 16))
})
# get controller index from usbVersion
# eg. 1.1 -> 0
# eg. 2.0.0 -> 1
# eg. 3 -> 2
bus = int(usb.split(":")[4][0]) - 1
if bus == 0:
address = e(hostdev, "address", None, {'type': 'usb', 'bus': str(bus), 'port': str(next_uhci_port)})
next_uhci_port += 1
elif bus == 1:
address = e(hostdev, "address", None, {'type': 'usb', 'bus': str(bus), 'port': str(next_ehci_port)})
next_ehci_port += 1
elif bus == 2:
address = e(hostdev, "address", None, {'type': 'usb', 'bus': str(bus), 'port': str(next_xhci_port)})
next_xhci_port += 1
else:
raise kvmagent.KvmError('unknown usb controller %s', bus)
if usb.split(":")[5] == "Redirect":
redirdev = e(devices, "redirdev", None, {'bus': 'usb', 'type': 'tcp'})
source = e(redirdev, "source", None, {'mode': 'connect', 'host': usb.split(":")[7], 'service': usb.split(":")[6]})
# get controller index from usbVersion
# eg. 1.1 -> 0
# eg. 2.0.0 -> 1
# eg. 3 -> 2
bus = int(usb.split(":")[4][0]) - 1
if bus == 0:
address = e(redirdev, "address", None,
{'type': 'usb', 'bus': str(bus), 'port': str(next_uhci_port)})
next_uhci_port += 1
elif bus == 1:
address = e(redirdev, "address", None,
{'type': 'usb', 'bus': str(bus), 'port': str(next_ehci_port)})
next_ehci_port += 1
elif bus == 2:
address = e(redirdev, "address", None,
{'type': 'usb', 'bus': str(bus), 'port': str(next_xhci_port)})
next_xhci_port += 1
else:
raise kvmagent.KvmError('unknown usb controller %s', bus)
else:
raise kvmagent.KvmError('cannot find usb device %s', usb)
#TODO(weiw) validate here
def match_storage_device(install_path):
return True
# TODO(WeiW) Validate here
def match_pci_device(addr):
return True
def match_usb_device(addr):
if len(addr.split(':')) == 8:
return True
else:
return False
def make_balloon_memory():
if cmd.addons['useMemBalloon'] is False:
return
devices = elements['devices']
b = e(devices, 'memballoon', None, {'model': 'virtio'})
e(b, 'stats', None, {'period': '10'})
if kvmagent.get_host_os_type() == "debian":
e(b, 'address', None, {'type': 'pci', 'controller': '0', 'bus': '0x00', 'slot': '0x04', 'function':'0x0'})
def make_console():
devices = elements['devices']
if cmd.consoleLogToFile:
logfilename = '%s-vm-kernel.log' % cmd.vmInstanceUuid
logpath = os.path.join(tempfile.gettempdir(), logfilename)
serial = e(devices, 'serial', None, {'type': 'file'})
e(serial, 'target', None, {'port': '0'})
e(serial, 'source', None, {'path': logpath})
console = e(devices, 'console', None, {'type': 'file'})
e(console, 'target', None, {'type': 'serial', 'port': '0'})
e(console, 'source', None, {'path': logpath})
else:
serial = e(devices, 'serial', None, {'type': 'pty'})
e(serial, 'target', None, {'port': '0'})
console = e(devices, 'console', None, {'type': 'pty'})
e(console, 'target', None, {'type': 'serial', 'port': '0'})
def make_sec_label():
root = elements['root']
e(root, 'seclabel', None, {'type': 'none'})
def make_controllers():
devices = elements['devices']
e(devices, 'controller', None, {'type': 'scsi', 'model': 'virtio-scsi'})
if machine_type in ['q35', 'virt']:
controller = e(devices, 'controller', None, {'type': 'sata', 'index': '0'})
e(controller, 'alias', None, {'name': 'sata'})
e(controller, 'address', None, {'type': 'pci', 'domain': '0', 'bus': '0', 'slot': '0x1f', 'function': '2'})
pci_idx_generator = range(cmd.pciePortNums + 3).__iter__()
e(devices, 'controller', None, {'type': 'pci', 'model': 'pcie-root', 'index': str(pci_idx_generator.next())})
e(devices, 'controller', None, {'type': 'pci', 'model': 'dmi-to-pci-bridge', 'index': str(pci_idx_generator.next())})
for _ in xrange(cmd.predefinedPciBridgeNum):
e(devices, 'controller', None, {'type': 'pci', 'model': 'pci-bridge', 'index': str(pci_idx_generator.next())})
for i in pci_idx_generator:
e(devices, 'controller', None, {'type': 'pci', 'model': 'pcie-root-port', 'index': str(i)})
else:
if not cmd.predefinedPciBridgeNum or HOST_ARCH == 'mips64el':
return
for i in xrange(cmd.predefinedPciBridgeNum):
e(devices, 'controller', None, {'type': 'pci', 'index': str(i + 1), 'model': 'pci-bridge'})
make_root()
make_meta()
make_cpu()
make_memory()
make_os()
make_sysinfo()
make_features()
make_devices()
make_video()
make_sound()
make_nics()
make_volumes()
if not cmd.addons or cmd.addons['noConsole'] is not True:
make_graphic_console()
make_addons()
make_balloon_memory()
make_console()
make_sec_label()
make_controllers()
if is_spiceport_driver_supported() and cmd.consoleMode in ["spice", "vncAndSpice"] and not cmd.coloPrimary and not cmd.coloSecondary:
make_folder_sharing()
# appliance vm doesn't need any cdrom or usb controller
if not cmd.isApplianceVm:
make_cdrom()
if not cmd.coloPrimary and not cmd.coloSecondary and not cmd.useColoBinary:
make_usb_redirect()
if cmd.additionalQmp:
make_qemu_commandline()
if cmd.useHugePage:
make_memory_backing()
root = elements['root']
xml = etree.tostring(root)
vm = Vm()
vm.uuid = cmd.vmInstanceUuid
if cmd.addons["userDefinedXml"] is not None:
vm.domain_xml = base64.b64decode(cmd.addons["userDefinedXml"])
vm.domain_xmlobject = xmlobject.loads(vm.domain_xml)
else:
vm.domain_xml = xml
vm.domain_xmlobject = xmlobject.loads(xml)
return vm
@staticmethod
def _build_interface_xml(nic, devices=None, vhostSrcPath=None, action=None, brMode=None, index=0):
if nic.pciDeviceAddress is not None:
iftype = 'hostdev'
device_attr = {'type': iftype, 'managed': 'yes'}
elif vhostSrcPath is not None:
iftype = 'vhostuser'
device_attr = {'type': iftype}
else:
iftype = 'bridge'
device_attr = {'type': iftype}
if devices:
interface = e(devices, 'interface', None, device_attr)
else:
interface = etree.Element('interface', attrib=device_attr)
e(interface, 'mac', None, attrib={'address': nic.mac})
e(interface, 'alias', None, {'name': 'net%s' % nic.nicInternalName.split('.')[1]})
if iftype != 'hostdev':
e(interface, 'mtu', None, attrib={'size': '%d' % nic.mtu})
if iftype == 'hostdev':
domain, bus, slot, function = parse_pci_device_address(nic.pciDeviceAddress)
source = e(interface, 'source')
e(source, 'address', None, attrib={'type': 'pci', 'domain': '0x' + domain, 'bus': '0x' + bus, 'slot': '0x' + slot, 'function': '0x' + function})
e(interface, 'driver', None, attrib={'name': 'vfio'})
if nic.vlanId is not None:
vlan = e(interface, 'vlan')
e(vlan, 'tag', None, attrib={'id': nic.vlanId})
elif iftype == 'vhostuser':
if brMode != 'mocbr':
e(interface, 'source', None, attrib={'type': 'unix', 'path': vhostSrcPath, 'mode': 'client'})
e(interface, 'driver', None, attrib={'queues': '16', 'vhostforce': 'on'})
else:
e(interface, 'source', None, attrib={'type': 'unix', 'path': '/var/run/phynic{}'.format(index+1), 'mode':'server'})
e(interface, 'driver', None, attrib={'queues': '8'})
else:
e(interface, 'source', None, attrib={'bridge': nic.bridgeName})
e(interface, 'target', None, attrib={'dev': nic.nicInternalName})
if nic.pci is not None and (iftype == 'bridge' or iftype == 'vhostuser'):
e(interface, 'address', None, attrib={'type': nic.pci.type, 'domain': nic.pci.domain, 'bus': nic.pci.bus, 'slot': nic.pci.slot, "function": nic.pci.function})
else:
e(interface, 'address', None, attrib={'type': "pci"})
if nic.ips and iftype == 'bridge':
ip4Addr = None
ip6Addrs = []
for addr in nic.ips:
version = netaddr.IPAddress(addr).version
if version == 4:
ip4Addr = addr
else:
ip6Addrs.append(addr)
# ipv4 nic
if ip4Addr is not None and len(ip6Addrs) == 0:
filterref = e(interface, 'filterref', None, {'filter': 'clean-traffic'})
e(filterref, 'parameter', None, {'name': 'IP', 'value': ip4Addr})
elif ip4Addr is None and len(ip6Addrs) > 0: # ipv6 nic
filterref = e(interface, 'filterref', None, {'filter': 'zstack-clean-traffic-ipv6'})
for addr6 in ip6Addrs:
e(filterref, 'parameter', None, {'name': 'GLOBAL_IP', 'value': addr6})
e(filterref, 'parameter', None, {'name': 'LINK_LOCAL_IP', 'value': ip.get_link_local_address(nic.mac)})
else: # dual stack nic
filterref = e(interface, 'filterref', None, {'filter': 'zstack-clean-traffic-ip46'})
e(filterref, 'parameter', None, {'name': 'IP', 'value': ip4Addr})
for addr6 in ip6Addrs:
e(filterref, 'parameter', None, {'name': 'GLOBAL_IP', 'value': addr6})
e(filterref, 'parameter', None, {'name': 'LINK_LOCAL_IP', 'value': ip.get_link_local_address(nic.mac)})
if iftype != 'hostdev':
if nic.driverType:
e(interface, 'model', None, attrib={'type': nic.driverType})
elif nic.useVirtio:
e(interface, 'model', None, attrib={'type': 'virtio'})
else:
e(interface, 'model', None, attrib={'type': 'e1000'})
if nic.driverType == 'virtio' and nic.vHostAddOn.queueNum != 1:
e(interface, 'driver ', None, attrib={'name': 'vhost', 'txmode': 'iothread', 'ioeventfd': 'on', 'event_idx': 'off', 'queues': str(nic.vHostAddOn.queueNum), 'rx_queue_size': str(nic.vHostAddOn.rxBufferSize) if nic.vHostAddOn.rxBufferSize is not None else '256', 'tx_queue_size': str(nic.vHostAddOn.txBufferSize) if nic.vHostAddOn.txBufferSize is not None else '256'})
if nic.bootOrder is not None and nic.bootOrder > 0:
e(interface, 'boot', None, attrib={'order': str(nic.bootOrder)})
@in_bash
@lock.file_lock('/run/xtables.lock')
def _config_ebtable_rules_for_vfnics():
VF_NIC_MAC = nic.mac
CHAIN_NAME = 'ZSTACK-VF-NICS'
EBTABLES_CMD = ebtables.get_ebtables_cmd()
if action == 'Attach':
if bash.bash_r(EBTABLES_CMD + ' -L {{CHAIN_NAME}} > /dev/null 2>&1') != 0:
bash.bash_r(EBTABLES_CMD + ' -N {{CHAIN_NAME}}')
if bash.bash_r(EBTABLES_CMD + ' -L FORWARD | grep -- "-j {{CHAIN_NAME}}" > /dev/null') != 0:
bash.bash_r(EBTABLES_CMD + ' -I FORWARD -j {{CHAIN_NAME}}')
if bash.bash_r(EBTABLES_CMD + ' -L {{CHAIN_NAME}} --Lmac2 | grep -- "-p IPv4 -s {{VF_NIC_MAC}} --ip-proto udp --ip-sport 67:68 -j ACCEPT" > /dev/null') != 0:
bash.bash_r(EBTABLES_CMD + ' -I {{CHAIN_NAME}} -p IPv4 -s {{VF_NIC_MAC}} --ip-proto udp --ip-sport 67:68 -j ACCEPT')
elif action == 'Detach':
# FIXME: when vm is destroyed, no vnic detaching function will be called and left some garbage rules
if bash.bash_r(EBTABLES_CMD + ' -L {{CHAIN_NAME}} --Lmac2 | grep -- "-p IPv4 -s {{VF_NIC_MAC}} --ip-proto udp --ip-sport 67:68 -j ACCEPT" > /dev/null') == 0:
bash.bash_r(EBTABLES_CMD + ' -D {{CHAIN_NAME}} -p IPv4 -s {{VF_NIC_MAC}} --ip-proto udp --ip-sport 67:68 -j ACCEPT')
@in_bash
def _add_bridge_fdb_entry_for_vnic():
if action == 'Attach':
# if nic.physicalInterface is bond, then find the first splited pf name out of its slaves
_phy_dev_name = nic.physicalInterface
_phy_dev_folder = os.path.join('/sys/class/net', _phy_dev_name)
for fname in os.listdir(_phy_dev_folder):
if fname.startswith('slave_'):
_slave_numvfs = os.path.join(_phy_dev_folder, fname, 'device/sriov_numvfs')
if os.path.isfile(_slave_numvfs):
with open(_slave_numvfs, 'r') as f:
if int(f.read().strip()) != 0:
_phy_dev_name = fname.replace('slave_', '').strip(' \t\n\r')
break
if not linux.bridge_fdb_has_self_rule(nic.mac, _phy_dev_name):
bash.bash_r("bridge fdb add %s dev %s" % (nic.mac, _phy_dev_name))
# to allow vf nic dhcp
if nic.pciDeviceAddress is not None:
_config_ebtable_rules_for_vfnics()
# to allow vnic/vf communication in same host
if nic.pciDeviceAddress is None and nic.physicalInterface is not None and brMode != 'mocbr':
_add_bridge_fdb_entry_for_vnic()
return interface
@staticmethod
def _ignore_colo_vm_nic_rom_file_on_interface(interface):
e(interface, 'driver', None, attrib={'name': 'qemu'})
e(interface, 'rom', None, attrib={'file': ''})
@staticmethod
def _add_qos_to_interface(interface, qos):
if not qos.outboundBandwidth and not qos.inboundBandwidth:
return
bandwidth = e(interface, 'bandwidth')
if qos.outboundBandwidth:
e(bandwidth, 'outbound', None, {'average': str(qos.outboundBandwidth / 1024 / 8)})
if qos.inboundBandwidth:
e(bandwidth, 'inbound', None, {'average': str(qos.inboundBandwidth / 1024 / 8)})
def _stop_world():
http.AsyncUirHandler.STOP_WORLD = True
VmPlugin.queue_singleton.queue.put("exit")
@in_bash
def execute_qmp_command(domain_id, command):
return bash.bash_roe("virsh qemu-monitor-command %s '%s' --pretty" % (domain_id, command))
class VmPlugin(kvmagent.KvmAgent):
KVM_START_VM_PATH = "/vm/start"
KVM_STOP_VM_PATH = "/vm/stop"
KVM_PAUSE_VM_PATH = "/vm/pause"
KVM_RESUME_VM_PATH = "/vm/resume"
KVM_REBOOT_VM_PATH = "/vm/reboot"
KVM_DESTROY_VM_PATH = "/vm/destroy"
KVM_ONLINE_CHANGE_CPUMEM_PATH = "/vm/online/changecpumem"
KVM_ONLINE_INCREASE_CPU_PATH = "/vm/increase/cpu"
KVM_ONLINE_INCREASE_MEMORY_PATH = "/vm/increase/mem"
KVM_GET_CONSOLE_PORT_PATH = "/vm/getvncport"
KVM_VM_SYNC_PATH = "/vm/vmsync"
KVM_ATTACH_VOLUME = "/vm/attachdatavolume"
KVM_DETACH_VOLUME = "/vm/detachdatavolume"
KVM_MIGRATE_VM_PATH = "/vm/migrate"
KVM_BLOCK_LIVE_MIGRATION_PATH = "/vm/blklivemigration"
KVM_VM_CHECK_VOLUME_PATH = "/vm/volume/check"
KVM_TAKE_VOLUME_SNAPSHOT_PATH = "/vm/volume/takesnapshot"
KVM_TAKE_VOLUME_BACKUP_PATH = "/vm/volume/takebackup"
KVM_BLOCK_STREAM_VOLUME_PATH = "/vm/volume/blockstream"
KVM_TAKE_VOLUMES_SNAPSHOT_PATH = "/vm/volumes/takesnapshot"
KVM_TAKE_VOLUMES_BACKUP_PATH = "/vm/volumes/takebackup"
KVM_CANCEL_VOLUME_BACKUP_JOBS_PATH = "/vm/volume/cancel/backupjobs"
KVM_MERGE_SNAPSHOT_PATH = "/vm/volume/mergesnapshot"
KVM_LOGOUT_ISCSI_TARGET_PATH = "/iscsi/target/logout"
KVM_LOGIN_ISCSI_TARGET_PATH = "/iscsi/target/login"
KVM_ATTACH_NIC_PATH = "/vm/attachnic"
KVM_DETACH_NIC_PATH = "/vm/detachnic"
KVM_UPDATE_NIC_PATH = "/vm/updatenic"
KVM_CREATE_SECRET = "/vm/createcephsecret"
KVM_ATTACH_ISO_PATH = "/vm/iso/attach"
KVM_DETACH_ISO_PATH = "/vm/iso/detach"
KVM_VM_CHECK_STATE = "/vm/checkstate"
KVM_VM_CHANGE_PASSWORD_PATH = "/vm/changepasswd"
KVM_SET_VOLUME_BANDWIDTH = "/set/volume/bandwidth"
KVM_DELETE_VOLUME_BANDWIDTH = "/delete/volume/bandwidth"
KVM_GET_VOLUME_BANDWIDTH = "/get/volume/bandwidth"
KVM_SET_NIC_QOS = "/set/nic/qos"
KVM_GET_NIC_QOS = "/get/nic/qos"
KVM_HARDEN_CONSOLE_PATH = "/vm/console/harden"
KVM_DELETE_CONSOLE_FIREWALL_PATH = "/vm/console/deletefirewall"
HOT_PLUG_PCI_DEVICE = "/pcidevice/hotplug"
HOT_UNPLUG_PCI_DEVICE = "/pcidevice/hotunplug"
ATTACH_PCI_DEVICE_TO_HOST = "/pcidevice/attachtohost"
DETACH_PCI_DEVICE_FROM_HOST = "/pcidevice/detachfromhost"
KVM_ATTACH_USB_DEVICE_PATH = "/vm/usbdevice/attach"
KVM_DETACH_USB_DEVICE_PATH = "/vm/usbdevice/detach"
RELOAD_USB_REDIRECT_PATH = "/vm/usbdevice/reload"
CHECK_MOUNT_DOMAIN_PATH = "/check/mount/domain"
KVM_RESIZE_VOLUME_PATH = "/volume/resize"
VM_PRIORITY_PATH = "/vm/priority"
ATTACH_GUEST_TOOLS_ISO_TO_VM_PATH = "/vm/guesttools/attachiso"
DETACH_GUEST_TOOLS_ISO_FROM_VM_PATH = "/vm/guesttools/detachiso"
GET_VM_GUEST_TOOLS_INFO_PATH = "/vm/guesttools/getinfo"
KVM_GET_VM_FIRST_BOOT_DEVICE_PATH = "/vm/getfirstbootdevice"
KVM_CONFIG_PRIMARY_VM_PATH = "/primary/vm/config"
KVM_CONFIG_SECONDARY_VM_PATH = "/secondary/vm/config"
KVM_START_COLO_SYNC_PATH = "/start/colo/sync"
KVM_REGISTER_PRIMARY_VM_HEARTBEAT = "/register/primary/vm/heartbeat"
CHECK_COLO_VM_STATE_PATH = "/check/colo/vm/state"
WAIT_COLO_VM_READY_PATH = "/wait/colo/vm/ready"
ROLLBACK_QUORUM_CONFIG_PATH = "/rollback/quorum/config"
FAIL_COLO_PVM_PATH = "/fail/colo/pvm"
GET_VM_DEVICE_ADDRESS_PATH = "/vm/getdeviceaddress"
VM_OP_START = "start"
VM_OP_STOP = "stop"
VM_OP_REBOOT = "reboot"
VM_OP_MIGRATE = "migrate"
VM_OP_DESTROY = "destroy"
VM_OP_SUSPEND = "suspend"
VM_OP_RESUME = "resume"
timeout_object = linux.TimeoutObject()
queue_singleton = VmPluginQueueSingleton()
secret_keys = {}
vm_heartbeat = {}
if not os.path.exists(QMP_SOCKET_PATH):
os.mkdir(QMP_SOCKET_PATH)
def _record_operation(self, uuid, op):
j = VmOperationJudger(op)
self.timeout_object.put(uuid, j, 300)
def _remove_operation(self, uuid):
self.timeout_object.remove(uuid)
def _get_operation(self, uuid):
o = self.timeout_object.get(uuid)
if not o:
return None
return o[0]
def _prepare_ebtables_for_mocbr(self, cmd):
brMode = cmd.addons['brMode'] if cmd.addons else None
if brMode != 'mocbr':
return
l3mapping = cmd.addons['l3mapping'] if cmd.addons else None
if not l3mapping:
return
if not cmd.nics:
return
mappings = {} # mac -> l3uuid
for ele in l3mapping:
m = ele.split("-")
mappings[m[0]] = m[1]
EBTABLES_CMD = ebtables.get_ebtables_cmd()
for nic in cmd.nics:
ns = "{}_{}".format(nic.bridgeName, mappings[nic.mac])
outerdev = "outer%s" % ip.get_namespace_id(ns)
rule = " -t nat -A PREROUTING -i {} -d {} -j dnat --to-destination ff:ff:ff:ff:ff:ff".format(outerdev, nic.mac)
bash.bash_r(EBTABLES_CMD + rule)
bash.bash_r("ebtables-save | uniq | ebtables-restore")
def _start_vm(self, cmd):
try:
vm = get_vm_by_uuid_no_retry(cmd.vmInstanceUuid, False)
if vm:
if vm.state == Vm.VM_STATE_RUNNING:
# http://jira.zstack.io/browse/ZSTAC-26937
#raise kvmagent.KvmError(
# 'vm[uuid:%s, name:%s] is already running' % (cmd.vmInstanceUuid, vm.get_name()))
logger.debug('vm[uuid:%s, name:%s] is already running' % (cmd.vmInstanceUuid, vm.get_name()))
return
else:
vm.destroy()
vm = Vm.from_StartVmCmd(cmd)
if cmd.memorySnapshotPath:
vm.restore(cmd.memorySnapshotPath)
return
wait_console = True if not cmd.addons or cmd.addons['noConsole'] is not True else False
self._prepare_ebtables_for_mocbr(cmd)
vm.start(cmd.timeout, cmd.createPaused, wait_console)
except libvirt.libvirtError as e:
logger.warn(linux.get_exception_stacktrace())
# c.f. https://access.redhat.com/solutions/2735671
if "org.fedoraproject.FirewallD1 was not provided" in str(e.message):
_stop_world() # to trigger libvirtd restart
raise kvmagent.KvmError(
'unable to start vm[uuid:%s, name:%s], libvirt error: %s' % (
cmd.vmInstanceUuid, cmd.vmName, str(e)))
if "Device or resource busy" in str(e.message):
raise kvmagent.KvmError(
'unable to start vm[uuid:%s, name:%s], libvirt error: %s' % (
cmd.vmInstanceUuid, cmd.vmName, str(e)))
try:
vm = get_vm_by_uuid(cmd.vmInstanceUuid)
if vm and vm.state != Vm.VM_STATE_RUNNING:
raise kvmagent.KvmError(
'vm[uuid:%s, name:%s, state:%s] is not in running state, libvirt error: %s' % (
cmd.vmInstanceUuid, cmd.vmName, vm.state, str(e)))
except kvmagent.KvmError:
raise kvmagent.KvmError(
'unable to start vm[uuid:%s, name:%s], libvirt error: %s' % (cmd.vmInstanceUuid, cmd.vmName, str(e)))
def _cleanup_iptable_chains(self, chain, data):
if 'vnic' not in chain.name:
return False
vnic_name = chain.name.split('-')[0]
if vnic_name not in data:
logger.debug('clean up defunct vnic chain[%s]' % chain.name)
return True
return False
@kvmagent.replyerror
def attach_iso(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
vm = get_vm_by_uuid(cmd.vmUuid)
vm.attach_iso(cmd)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def detach_iso(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
vm = get_vm_by_uuid(cmd.vmUuid)
vm.detach_iso(cmd)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def attach_nic(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AttchNicResponse()
vm = get_vm_by_uuid(cmd.vmUuid)
vm.attach_nic(cmd)
for iface in vm.domain_xmlobject.devices.get_child_node_as_list('interface'):
if iface.mac.address_ == cmd.nic.mac:
rsp.pciAddress.bus = iface.address.bus_
rsp.pciAddress.function = iface.address.function_
rsp.pciAddress.type = iface.address.type_
rsp.pciAddress.domain = iface.address.domain_
rsp.pciAddress.slot = iface.address.slot_
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def detach_nic(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
vm = get_vm_by_uuid(cmd.vmUuid, False)
if not vm:
return jsonobject.dumps(rsp)
vm.detach_nic(cmd)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def update_nic(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
vm = get_vm_by_uuid(cmd.vmInstanceUuid)
vm.update_nic(cmd)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def start_vm(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = StartVmResponse()
try:
self._record_operation(cmd.vmInstanceUuid, self.VM_OP_START)
self._start_vm(cmd)
logger.debug('successfully started vm[uuid:%s, name:%s]' % (cmd.vmInstanceUuid, cmd.vmName))
try:
vm_pid = linux.find_vm_pid_by_uuid(cmd.vmInstanceUuid)
linux.enable_process_coredump(vm_pid)
linux.set_vm_priority(vm_pid, cmd.priorityConfigStruct)
except Exception as e:
logger.warn("enable coredump for VM: %s: %s" % (cmd.vmInstanceUuid, str(e)))
except kvmagent.KvmError as e:
e_str = linux.get_exception_stacktrace()
logger.warn(e_str)
if "burst" in e_str and "Illegal" in e_str and "rate" in e_str:
rsp.error = "QoS exceed max limit, please check and reset it in zstack"
elif "cannot set up guest memory" in e_str:
logger.warn('unable to start vm[uuid:%s], %s' % (cmd.vmInstanceUuid, e_str))
rsp.error = "No enough physical memory for guest"
else:
rsp.error = e_str
err = self.handle_vfio_irq_conflict(cmd.vmInstanceUuid)
if err != "":
rsp.error = "%s, details: %s" % (err, rsp.error)
rsp.success = False
return jsonobject.dumps(rsp)
def get_vm_stat_with_ps(self, uuid):
"""In case libvirtd is stopped or misbehaved"""
if not linux.find_vm_pid_by_uuid(uuid):
return Vm.VM_STATE_SHUTDOWN
return Vm.VM_STATE_RUNNING
@kvmagent.replyerror
def check_vm_state(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
states = get_all_vm_states()
rsp = CheckVmStateRsp()
for uuid in cmd.vmUuids:
s = states.get(uuid)
if not s:
s = self.get_vm_stat_with_ps(uuid)
rsp.states[uuid] = s
return jsonobject.dumps(rsp)
def _escape(self, size):
unit = size.strip().lower()[-1]
num = size.strip()[:-1]
units = {
"g": lambda x: x * 1024,
"m": lambda x: x,
"k": lambda x: x / 1024,
}
return int(units[unit](int(num)))
def _get_image_mb_size(self, image):
backing = shell.call('%s %s | grep "backing file:" | awk -F \'backing file:\' \'{print $2}\' ' %
(qemu_img.subcmd('info'), image)).strip()
size = shell.call('%s %s | grep "disk size:" | awk -F \'disk size:\' \'{print $2}\' ' %
(qemu_img.subcmd('info'), image)).strip()
if not backing:
return self._escape(size)
else:
return self._get_image_mb_size(backing) + self._escape(size)
def _get_volume_bandwidth_value(self, vm_uuid, device_id, mode):
cmd_base = "virsh blkdeviotune %s %s" % (vm_uuid, device_id)
if mode == "total":
return shell.call('%s | grep -w total_bytes_sec | awk \'{print $2}\'' % cmd_base).strip()
elif mode == "read":
return shell.call('%s | grep -w read_bytes_sec | awk \'{print $3}\'' % cmd_base).strip()
elif mode == "write":
return shell.call('%s | grep -w write_bytes_sec | awk \'{print $2}\'' % cmd_base).strip()
@kvmagent.replyerror
def set_volume_bandwidth(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
vm = get_vm_by_uuid(cmd.vmUuid)
_, device_id = vm._get_target_disk(cmd.volume)
## total and read/write of bytes_sec cannot be set at the same time
## http://confluence.zstack.io/pages/viewpage.action?pageId=42599772#comment-42600879
cmd_base = "virsh blkdeviotune %s %s" % (cmd.vmUuid, device_id)
if (cmd.mode == "total") or (cmd.mode is None): # to set total(read/write reset)
shell.call('%s --total_bytes_sec %s' % (cmd_base, cmd.totalBandwidth))
elif cmd.mode == "all":
shell.call('%s --read_bytes_sec %s --write_bytes_sec %s' % (cmd_base, cmd.readBandwidth, cmd.writeBandwidth))
elif cmd.mode == "read": # to set read(write reserved, total reset)
write_bytes_sec = self._get_volume_bandwidth_value(cmd.vmUuid, device_id, "write")
shell.call('%s --read_bytes_sec %s --write_bytes_sec %s' % (cmd_base, cmd.readBandwidth, write_bytes_sec))
elif cmd.mode == "write": # to set write(read reserved, total reset)
read_bytes_sec = self._get_volume_bandwidth_value(cmd.vmUuid, device_id, "read")
shell.call('%s --read_bytes_sec %s --write_bytes_sec %s' % (cmd_base, read_bytes_sec, cmd.writeBandwidth))
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def delete_volume_bandwidth(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
vm = get_vm_by_uuid(cmd.vmUuid)
_, device_id = vm._get_target_disk(cmd.volume)
## total and read/write of bytes_sec cannot be set at the same time
## http://confluence.zstack.io/pages/viewpage.action?pageId=42599772#comment-42600879
cmd_base = "virsh blkdeviotune %s %s" % (cmd.vmUuid, device_id)
is_total_mode = self._get_volume_bandwidth_value(cmd.vmUuid, device_id, "total") != "0"
if cmd.mode == "all": # to delete all(read/write reset)
shell.call('%s --total_bytes_sec 0' % (cmd_base))
elif (cmd.mode == "total") or (cmd.mode is None): # to delete total
if is_total_mode:
shell.call('%s --total_bytes_sec 0' % (cmd_base))
elif cmd.mode == "read": # to delete read(write reserved, total reset)
if not is_total_mode:
write_bytes_sec = self._get_volume_bandwidth_value(cmd.vmUuid, device_id, "write")
shell.call('%s --read_bytes_sec 0 --write_bytes_sec %s' % (cmd_base, write_bytes_sec))
elif cmd.mode == "write": # to delete write(read reserved, total reset)
if not is_total_mode:
read_bytes_sec = self._get_volume_bandwidth_value(cmd.vmUuid, device_id, "read")
shell.call('%s --read_bytes_sec %s --write_bytes_sec 0' % (cmd_base, read_bytes_sec))
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_volume_bandwidth(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
vm = get_vm_by_uuid(cmd.vmUuid)
_, device_id = vm._get_target_disk(cmd.volume)
cmd_base = "virsh blkdeviotune %s %s" % (cmd.vmUuid, device_id)
bandWidth = shell.call('%s | grep -w total_bytes_sec | awk \'{print $2}\'' % cmd_base).strip()
bandWidthRead = shell.call('%s | grep -w read_bytes_sec | awk \'{print $3}\'' % cmd_base).strip()
bandWidthWrite = shell.call('%s | grep -w write_bytes_sec | awk \'{print $2}\'' % cmd_base).strip()
rsp.bandWidth = bandWidth if long(bandWidth) > 0 else -1
rsp.bandWidthWrite = bandWidthWrite if long(bandWidthWrite) > 0 else -1
rsp.bandWidthRead = bandWidthRead if long(bandWidthRead) > 0 else -1
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def set_nic_qos(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
try:
if cmd.inboundBandwidth != -1:
shell.call('virsh domiftune %s %s --inbound %s' % (cmd.vmUuid, cmd.internalName, cmd.inboundBandwidth/1024/8))
if cmd.outboundBandwidth != -1:
shell.call('virsh domiftune %s %s --outbound %s' % (cmd.vmUuid, cmd.internalName, cmd.outboundBandwidth/1024/8))
except Exception as e:
e_str = linux.get_exception_stacktrace()
logger.warn(e_str)
if "burst" in e_str and "Illegal" in e_str and "rate" in e_str:
rsp.error = "QoS exceed the max limit, please check and reset it in zstack"
else:
rsp.error = e_str
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_nic_qos(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
inbound = shell.call('virsh domiftune %s %s | grep "inbound.average:"|awk \'{print $2}\'' % (cmd.vmUuid, cmd.internalName)).strip()
outbound = shell.call('virsh domiftune %s %s | grep "outbound.average:"|awk \'{print $2}\'' % (cmd.vmUuid, cmd.internalName)).strip()
rsp.inbound = long(inbound) * 8 * 1024 if long(inbound) > 0 else -1
rsp.outbound = long(outbound) * 8 * 1024 if long(outbound) > 0 else -1
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def check_mount_domain(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = CheckMountDomainRsp()
finish_time = time.time() + (cmd.timeout / 1000)
while time.time() < finish_time:
try:
logger.debug("check mount url: %s" % cmd.url)
linux.is_valid_nfs_url(cmd.url)
rsp.active = True
return jsonobject.dumps(rsp)
except Exception as err:
if 'cannont resolve to ip address' in err.message:
logger.warn(err.message)
logger.warn('wait 1 seconds')
else:
raise err
time.sleep(1)
rsp.active = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def change_vm_password(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = ChangeVmPasswordRsp()
vm = get_vm_by_uuid(cmd.accountPerference.vmUuid, False)
try:
if not vm:
raise kvmagent.KvmError('vm is not in running state.')
else:
vm.change_vm_password(cmd)
except kvmagent.KvmError as e:
rsp.error = str(e)
rsp.success = False
rsp.accountPerference = cmd.accountPerference
rsp.accountPerference.accountPassword = "******"
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def harden_console(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
vm = get_vm_by_uuid(cmd.vmUuid)
vm.harden_console(cmd.hostManagementIp)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def vm_sync(self, req):
rsp = VmSyncResponse()
rsp.states, rsp.vmInShutdowns = get_all_vm_sync_states()
# In case of an reboot inside the VM. Note that ZS will only define transient VM's.
retry_for_paused = []
for uuid in rsp.states:
if rsp.states[uuid] == Vm.VM_STATE_SHUTDOWN:
rsp.states[uuid] = Vm.VM_STATE_RUNNING
elif rsp.states[uuid] == Vm.VM_STATE_PAUSED:
retry_for_paused.append(uuid)
# Occasionally, virsh might not be able to list all VM instances with
# uri=qemu://system. To prevend this situation, we double check the
# 'rsp.states' agaist QEMU process lists.
output = bash.bash_o("ps x | grep -P -o 'qemu-kvm.*?-name\s+(guest=)?\K.*?,' | sed 's/.$//'").splitlines()
for guest in output:
if guest in rsp.states \
or guest.lower() == "ZStack Management Node VM".lower()\
or guest.startswith("guestfs-"):
continue
logger.warn('guest [%s] not found in virsh list' % guest)
rsp.states[guest] = Vm.VM_STATE_RUNNING
time.sleep(0.5)
if len(retry_for_paused) > 0:
states, in_shutdown = get_all_vm_sync_states()
for uuid in states:
if states[uuid] == Vm.VM_STATE_SHUTDOWN:
rsp.states[uuid] = Vm.VM_STATE_RUNNING
elif states[uuid] != Vm.VM_STATE_PAUSED:
rsp.states[uuid] = states[uuid]
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def online_increase_mem(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = IncreaseMemoryResponse()
try:
vm = get_vm_by_uuid(cmd.vmUuid)
memory_size = cmd.memorySize
vm.hotplug_mem(memory_size)
vm = get_vm_by_uuid(cmd.vmUuid)
rsp.memorySize = vm.get_memory()
logger.debug('successfully increase memory of vm[uuid:%s] to %s Kib' % (cmd.vmUuid, vm.get_memory()))
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def online_increase_cpu(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = IncreaseCpuResponse()
try:
vm = get_vm_by_uuid(cmd.vmUuid)
cpu_num = cmd.cpuNum
vm.hotplug_cpu(cpu_num)
vm = get_vm_by_uuid(cmd.vmUuid)
rsp.cpuNum = vm.get_cpu_num()
logger.debug('successfully increase cpu number of vm[uuid:%s] to %s' % (cmd.vmUuid, vm.get_cpu_num()))
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def online_change_cpumem(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = ChangeCpuMemResponse()
try:
vm = get_vm_by_uuid(cmd.vmUuid)
cpu_num = cmd.cpuNum
memory_size = cmd.memorySize
vm.hotplug_mem(memory_size)
vm.hotplug_cpu(cpu_num)
vm = get_vm_by_uuid(cmd.vmUuid)
rsp.cpuNum = vm.get_cpu_num()
rsp.memorySize = vm.get_memory()
logger.debug('successfully add cpu and memory on vm[uuid:%s]' % (cmd.vmUuid))
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
def get_vm_console_info(self, vmUuid):
try:
vm = get_vm_by_uuid(vmUuid)
protocol, vncPort, spicePort, spiceTlsPort = vm.get_vdi_connect_port()
ret = check_vdi_port(vncPort, spicePort, spiceTlsPort)
if ret is True:
return protocol, vncPort, spicePort, spiceTlsPort
# Occasionally, 'virsh list' would list nothing but conn.lookupByName()
# can find the VM and dom.XMLDesc(0) will return VNC port '-1'.
err = 'libvirt failed to get console port for VM %s' % vmUuid
logger.warn(err)
raise kvmagent.KvmError(err)
except kvmagent.KvmError as e:
protocol, vncPort, spicePort, spiceTlsPort = get_console_without_libvirt(vmUuid)
ret = check_vdi_port(vncPort, spicePort, spiceTlsPort)
if ret is True:
return protocol, vncPort, spicePort, spiceTlsPort
raise e
@kvmagent.replyerror
def get_console_port(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetVncPortResponse()
try:
protocol, vncPort, spicePort, spiceTlsPort = self.get_vm_console_info(cmd.vmUuid)
rsp.protocol = protocol
rsp.vncPort = vncPort
rsp.spicePort = spicePort
rsp.spiceTlsPort = spiceTlsPort
if vncPort is not None:
rsp.port = vncPort
else:
rsp.port = spicePort
logger.debug('successfully get vncPort[%s], spicePort[%s], spiceTlsPort[%s] of vm[uuid:%s]' % (
vncPort, spicePort, spiceTlsPort, cmd.vmUuid))
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
def _stop_vm(self, cmd):
try:
vm = get_vm_by_uuid(cmd.uuid)
strategy = str(cmd.type)
if strategy == "cold" or strategy == "force":
vm.stop(strategy=strategy)
else:
vm.stop(timeout=cmd.timeout / 2)
except kvmagent.KvmError as e:
logger.debug(linux.get_exception_stacktrace())
finally:
# libvirt is not reliable, c.f. ZSTAC-15412
self.kill_vm(cmd.uuid)
def kill_vm(self, vm_uuid):
output = bash.bash_o("ps x | grep -P -o 'qemu-kvm.*?-name\s+(guest=)?\K%s,' | sed 's/.$//'" % vm_uuid)
if vm_uuid not in output:
return
logger.debug('killing vm %s' % vm_uuid)
vm_pid = linux.find_vm_pid_by_uuid(vm_uuid)
if vm_pid:
linux.kill_process(vm_pid)
@kvmagent.replyerror
def stop_vm(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = StopVmResponse()
try:
self._record_operation(cmd.uuid, self.VM_OP_STOP)
self._stop_vm(cmd)
logger.debug("successfully stopped vm[uuid:%s]" % cmd.uuid)
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def pause_vm(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
try:
self._record_operation(cmd.uuid, self.VM_OP_SUSPEND)
rsp = PauseVmResponse()
vm = get_vm_by_uuid(cmd.uuid)
vm.pause()
logger.debug('successfully, pause vm [uuid:%s]' % cmd.uuid)
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def resume_vm(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
try:
self._record_operation(cmd.uuid, self.VM_OP_RESUME)
rsp = ResumeVmResponse()
vm = get_vm_by_uuid(cmd.uuid)
vm.resume()
logger.debug('successfully, resume vm [uuid:%s]' % cmd.uuid)
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def reboot_vm(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = RebootVmResponse()
try:
self._record_operation(cmd.uuid, self.VM_OP_REBOOT)
vm = get_vm_by_uuid(cmd.uuid)
vm.reboot(cmd)
logger.debug('successfully, reboot vm[uuid:%s]' % cmd.uuid)
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def destroy_vm(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = DestroyVmResponse()
try:
self._record_operation(cmd.uuid, self.VM_OP_DESTROY)
vm = get_vm_by_uuid(cmd.uuid, False)
if vm:
vm.destroy()
logger.debug('successfully destroyed vm[uuid:%s]' % cmd.uuid)
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def attach_data_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AttachDataVolumeResponse()
try:
volume = cmd.volume
vm = get_vm_by_uuid(cmd.vmInstanceUuid)
if vm.state != Vm.VM_STATE_RUNNING and vm.state != Vm.VM_STATE_PAUSED:
raise kvmagent.KvmError(
'unable to attach volume[%s] to vm[uuid:%s], vm must be running or paused' % (volume.installPath, vm.uuid))
vm.attach_data_volume(cmd.volume, cmd.addons)
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
touchQmpSocketWhenExists(cmd.vmInstanceUuid)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def detach_data_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = DetachDataVolumeResponse()
try:
volume = cmd.volume
vm = get_vm_by_uuid(cmd.vmInstanceUuid)
if vm.state != Vm.VM_STATE_RUNNING and vm.state != Vm.VM_STATE_PAUSED:
raise kvmagent.KvmError(
'unable to detach volume[%s] to vm[uuid:%s], vm must be running or paused' % (volume.installPath, vm.uuid))
vm.detach_data_volume(volume)
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def migrate_vm(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = MigrateVmResponse()
try:
self._record_operation(cmd.vmUuid, self.VM_OP_MIGRATE)
if cmd.migrateFromDestination:
with contextlib.closing(get_connect(cmd.srcHostIp)) as conn:
vm = get_vm_by_uuid(cmd.vmUuid, False, conn)
if vm is None:
logger.warn('unable to find vm {0} on host {1}'.format(cmd.vmUuid, cmd.srcHostIp))
raise kvmagent.KvmError('unable to find vm %s on host %s' % (cmd.vmUuid, cmd.srcHostIp))
vm.migrate(cmd)
else:
vm = get_vm_by_uuid(cmd.vmUuid)
vm.migrate(cmd)
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def check_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
vm = get_vm_by_uuid(cmd.uuid)
for volume in cmd.volumes:
vm._get_target_disk(volume)
return jsonobject.dumps(rsp)
def _get_new_disk(self, oldDisk, volume):
def filebased_volume(_v):
disk = etree.Element('disk', {'type': 'file', 'device': 'disk', 'snapshot': 'external'})
e(disk, 'driver', None, {'name': 'qemu', 'type': 'qcow2', 'cache': _v.cacheMode})
e(disk, 'source', None, {'file': _v.installPath})
return disk
def ceph_volume(_v):
def ceph_virtio():
vc = VirtioCeph()
vc.volume = _v
return vc.to_xmlobject()
def ceph_blk():
ic = BlkCeph()
ic.volume = _v
return ic.to_xmlobject()
def ceph_virtio_scsi():
vsc = VirtioSCSICeph()
vsc.volume = _v
return vsc.to_xmlobject()
if _v.useVirtioSCSI:
disk = ceph_virtio_scsi()
if _v.shareable:
e(disk, 'shareable')
return disk
if _v.useVirtio:
return ceph_virtio()
else:
return ceph_blk()
def block_volume(_v):
disk = etree.Element('disk', {'type': 'block', 'device': 'disk', 'snapshot': 'external'})
e(disk, 'driver', None,
{'name': 'qemu', 'type': 'raw', 'cache': 'none', 'io': 'native'})
e(disk, 'source', None, {'dev': _v.installPath})
return disk
if volume.deviceType == 'file':
ele = filebased_volume(volume)
elif volume.deviceType == 'ceph':
ele = ceph_volume(volume)
elif volume.deviceType == 'block':
ele = block_volume(volume)
else:
raise Exception('unsupported volume deviceType[%s]' % volume.deviceType)
tags_to_keep = [ 'target', 'boot', 'alias', 'address', 'wwn', 'serial']
for c in oldDisk.getchildren():
if c.tag in tags_to_keep:
child = ele.find(c.tag)
if child is not None: ele.remove(child)
ele.append(c)
logger.info("updated disk XML: " + etree.tostring(ele))
return ele
def _build_domain_new_xml(self, vm, volumeDicts):
migrate_disks = {}
for oldpath, volume in volumeDicts.items():
_, disk_name = vm._get_target_disk_by_path(oldpath)
migrate_disks[disk_name] = volume
fd, fpath = tempfile.mkstemp()
with os.fdopen(fd, 'w') as tmpf:
tmpf.write(vm.domain_xml)
tree = etree.parse(fpath)
devices = tree.getroot().find('devices')
for disk in tree.iterfind('devices/disk'):
dev = disk.find('target').attrib['dev']
if dev in migrate_disks:
new_disk = self._get_new_disk(disk, migrate_disks[dev])
parent_index = list(devices).index(disk)
devices.remove(disk)
devices.insert(parent_index, new_disk)
tree.write(fpath)
return migrate_disks.keys(), fpath
def _do_block_migration(self, vmUuid, dstHostIp, volumeDicts):
vm = get_vm_by_uuid(vmUuid)
disks, fpath = self._build_domain_new_xml(vm, volumeDicts)
dst = 'qemu+tcp://{0}/system'.format(dstHostIp)
migurl = 'tcp://{0}'.format(dstHostIp)
diskstr = ','.join(disks)
flags = "--live --p2p --copy-storage-all"
if LIBVIRT_MAJOR_VERSION >= 4:
if any(s.startswith('/dev/') for s in vm.list_blk_sources()):
flags += " --unsafe"
cmd = "virsh migrate {} --migrate-disks {} --xml {} {} {} {}".format(flags, diskstr, fpath, vmUuid, dst, migurl)
try:
shell.check_run(cmd)
finally:
os.remove(fpath)
@kvmagent.replyerror
def block_migrate_vm(self, req):
rsp = kvmagent.AgentResponse()
cmd = jsonobject.loads(req[http.REQUEST_BODY])
self._record_operation(cmd.vmUuid, self.VM_OP_MIGRATE)
self._do_block_migration(cmd.vmUuid, cmd.destHostIp, cmd.disks.__dict__)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def merge_snapshot_to_volume(self, req):
rsp = MergeSnapshotRsp()
cmd = jsonobject.loads(req[http.REQUEST_BODY])
vm = get_vm_by_uuid(cmd.vmUuid, exception_if_not_existing=True)
if vm.state != vm.VM_STATE_RUNNING:
rsp.error = 'vm[uuid:%s] is not running, cannot do live snapshot chain merge' % vm.uuid
rsp.success = False
return jsonobject.dumps(rsp)
vm.merge_snapshot(cmd)
return jsonobject.dumps(rsp)
@staticmethod
def _get_snapshot_size(install_path):
size = linux.get_local_file_disk_usage(install_path)
if size is None or size == 0:
if install_path.startswith("/dev/"):
size = int(lvm.get_lv_size(install_path))
else:
size = linux.qcow2_virtualsize(install_path)
return size
@kvmagent.replyerror
def take_volumes_snapshots(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY]) # type: TakeSnapshotsCmd
rsp = TakeSnapshotsResponse() # type: TakeSnapshotsResponse
for snapshot_job in cmd.snapshotJobs:
if snapshot_job.vmInstanceUuid != cmd.snapshotJobs[0].vmInstanceUuid:
raise kvmagent.KvmError("can not take snapshot on multiple vms[%s and %s]" %
snapshot_job.vmInstanceUuid, cmd.snapshotJobs[0].vmInstanceUuid)
if snapshot_job.live != cmd.snapshotJobs[0].live:
raise kvmagent.KvmError("can not take snapshot on different live status")
Vm.ensure_no_internal_snapshot(snapshot_job.volume.installPath)
def makedir_if_need(new_path):
dirname = os.path.dirname(new_path)
if not os.path.exists(dirname):
os.makedirs(dirname, 0o755)
def get_size(install_path):
"""
:rtype: long
"""
return VmPlugin._get_snapshot_size(install_path)
def take_full_snapshot_by_qemu_img_convert(previous_install_path, install_path, new_volume_install_path):
"""
:rtype: (str, str, long)
"""
makedir_if_need(install_path)
linux.create_template(previous_install_path, install_path)
new_volume_path = new_volume_install_path if new_volume_install_path is not None else os.path.join(os.path.dirname(install_path), '{0}.qcow2'.format(uuidhelper.uuid()))
makedir_if_need(new_volume_path)
linux.qcow2_clone_with_cmd(install_path, new_volume_path, cmd)
return install_path, new_volume_path, get_size(install_path)
def take_delta_snapshot_by_qemu_img_convert(previous_install_path, install_path, new_volume_install_path):
"""
:rtype: (str, str, long)
"""
new_volume_path = new_volume_install_path if new_volume_install_path is not None else os.path.join(os.path.dirname(install_path), '{0}.qcow2'.format(uuidhelper.uuid()))
makedir_if_need(new_volume_path)
linux.qcow2_clone_with_cmd(previous_install_path, new_volume_path, cmd)
return previous_install_path, new_volume_path, get_size(install_path)
vm = get_vm_by_uuid(cmd.snapshotJobs[0].vmInstanceUuid, exception_if_not_existing=False)
try:
if vm and vm.state not in vm.ALLOW_SNAPSHOT_STATE:
raise kvmagent.KvmError(
'unable to take snapshot on vm[uuid:{0}] volume[id:{1}], '
'because vm is not in [{2}], current state is {3}'.format(
vm.uuid, cmd.snapshotJobs[0].deviceId, vm.ALLOW_SNAPSHOT_STATE, vm.state))
if vm and (vm.state == vm.VM_STATE_RUNNING or vm.state == vm.VM_STATE_PAUSED):
rsp.snapshots = vm.take_live_volumes_delta_snapshots(cmd.snapshotJobs)
else:
if vm and cmd.snapshotJobs[0].live is True:
raise kvmagent.KvmError("expected live snapshot but vm[%s] state is %s" %
vm.uuid, vm.state)
elif not vm and cmd.snapshotJobs[0].live is True:
raise kvmagent.KvmError("expected live snapshot but can not find vm[%s]" %
cmd.snapshotJobs[0].vmInstanceUuid)
for snapshot_job in cmd.snapshotJobs:
if snapshot_job.full:
rsp.snapshots.append(VolumeSnapshotResultStruct(
snapshot_job.volumeUuid, *take_full_snapshot_by_qemu_img_convert(
snapshot_job.previousInstallPath, snapshot_job.installPath, snapshot_job.newVolumeInstallPath)))
else:
rsp.snapshots.append(VolumeSnapshotResultStruct(
snapshot_job.volumeUuid, *take_delta_snapshot_by_qemu_img_convert(
snapshot_job.previousInstallPath, snapshot_job.installPath, snapshot_job.newVolumeInstallPath)))
except kvmagent.KvmError as error:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(error)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def take_volume_snapshot(self, req):
""" Take snapshot for a volume
:param req: The request obj, exmaple of req.body::
{
'vmUuid': '0dc62031-678d-4040-95e4-64fb217a2669',
'volumeUuid': '2e9fd964-ba33-4214-aaad-c6e16b9ae72b',
'volume': {
},
'installPath': '',
'volumeInstallPath': '',
'newVolumeUuid': '',
'newVolumeInstallPath': '',
'fullSnapshot': False,
'isBaremetal2InstanceOnlineSnapshot': False
}
"""
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = TakeSnapshotResponse()
def makedir_if_need(new_path):
dirname = os.path.dirname(new_path)
if not os.path.exists(dirname):
os.makedirs(dirname, 0755)
def take_full_snapshot_by_qemu_img_convert(previous_install_path, install_path):
makedir_if_need(install_path)
linux.create_template(previous_install_path, install_path)
new_volume_path = cmd.newVolumeInstallPath if cmd.newVolumeInstallPath is not None else os.path.join(os.path.dirname(install_path), '{0}.qcow2'.format(uuidhelper.uuid()))
makedir_if_need(new_volume_path)
linux.qcow2_clone_with_cmd(install_path, new_volume_path, cmd)
return install_path, new_volume_path
def take_delta_snapshot_by_qemu_img_convert(previous_install_path, install_path):
new_volume_path = cmd.newVolumeInstallPath if cmd.newVolumeInstallPath is not None else os.path.join(os.path.dirname(install_path), '{0}.qcow2'.format(uuidhelper.uuid()))
makedir_if_need(new_volume_path)
linux.qcow2_clone_with_cmd(previous_install_path, new_volume_path, cmd)
return previous_install_path, new_volume_path
try:
Vm.ensure_no_internal_snapshot(cmd.volumeInstallPath)
if not cmd.vmUuid:
if cmd.fullSnapshot:
rsp.snapshotInstallPath, rsp.newVolumeInstallPath = take_full_snapshot_by_qemu_img_convert(
cmd.volumeInstallPath, cmd.installPath)
else:
rsp.snapshotInstallPath, rsp.newVolumeInstallPath = take_delta_snapshot_by_qemu_img_convert(
cmd.volumeInstallPath, cmd.installPath)
else:
# New params in cmd:
# A flag to show the instance is bm instance and the instance
# status is online
if cmd.isBaremetal2InstanceOnlineSnapshot:
with bm_utils.NamedLock(name='baremetal_v2_volume_operator'):
src_vol_driver, dst_vol_driver = BmV2GwAgent.pre_take_volume_snapshot(cmd)
try:
rsp.snapshotInstallPath, rsp.newVolumeInstallPath = take_delta_snapshot_by_qemu_img_convert(
cmd.volumeInstallPath, cmd.installPath)
BmV2GwAgent.post_take_volume_snapshot(src_vol_driver, dst_vol_driver)
except Exception as e:
# Try to rollback the snapshot action
# BmV2GwAgent.resume_device(src_vol_driver)
BmV2GwAgent.rollback_volume_snapshot(
src_vol_driver, dst_vol_driver)
logger.error(traceback.format_exc())
raise e
else:
vm = get_vm_by_uuid(cmd.vmUuid, exception_if_not_existing=False)
if vm and vm.state != vm.VM_STATE_RUNNING and vm.state != vm.VM_STATE_SHUTDOWN and vm.state != vm.VM_STATE_PAUSED:
raise kvmagent.KvmError(
'unable to take snapshot on vm[uuid:{0}] volume[id:{1}], because vm is not Running, Stopped or Paused, current state is {2}'.format(
vm.uuid, cmd.volume.deviceId, vm.state))
if vm and (vm.state == vm.VM_STATE_RUNNING or vm.state == vm.VM_STATE_PAUSED):
rsp.snapshotInstallPath, rsp.newVolumeInstallPath = vm.take_volume_snapshot(cmd.volume,
cmd.installPath,
cmd.fullSnapshot)
else:
if cmd.fullSnapshot:
rsp.snapshotInstallPath, rsp.newVolumeInstallPath = take_full_snapshot_by_qemu_img_convert(
cmd.volumeInstallPath, cmd.installPath)
else:
rsp.snapshotInstallPath, rsp.newVolumeInstallPath = take_delta_snapshot_by_qemu_img_convert(
cmd.volumeInstallPath, cmd.installPath)
if cmd.fullSnapshot:
logger.debug(
'took full snapshot on vm[uuid:{0}] volume[id:{1}], snapshot path:{2}, new volulme path:{3}'.format(
cmd.vmUuid, cmd.volume.deviceId, rsp.snapshotInstallPath, rsp.newVolumeInstallPath))
else:
logger.debug(
'took delta snapshot on vm[uuid:{0}] volume[id:{1}], snapshot path:{2}, new volulme path:{3}'.format(
cmd.vmUuid, cmd.volume.deviceId, rsp.snapshotInstallPath, rsp.newVolumeInstallPath))
linux.sync_file(rsp.snapshotInstallPath)
rsp.size = VmPlugin._get_snapshot_size(rsp.snapshotInstallPath)
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
if not cmd.isBaremetal2InstanceOnlineSnapshot:
touchQmpSocketWhenExists(cmd.vmUuid)
return jsonobject.dumps(rsp)
def push_backing_files(self, isc, hostname, drivertype, source):
if drivertype != 'qcow2':
return None
bf = linux.qcow2_get_backing_file(source.file_)
if bf:
imf = isc.upload_image(hostname, bf)
return imf
return None
def do_cancel_backup_jobs(self, cmd):
isc = ImageStoreClient()
isc.stop_backup_jobs(cmd.vmUuid)
# returns list[VolumeBackupInfo]
def do_take_volumes_backup(self, cmd, target_disks, bitmaps, dstdir):
isc = ImageStoreClient()
backupArgs = {}
parents = {}
speed = 0
if cmd.volumeWriteBandwidth:
speed = cmd.volumeWriteBandwidth
device_ids = [volume.deviceId for volume in cmd.volumes]
for deviceId in device_ids:
target_disk = target_disks[deviceId]
drivertype = target_disk.driver.type_
nodename = self.get_backup_device_name(target_disk)
source = target_disk.source
bitmap = bitmaps[deviceId]
def get_backup_args():
if bitmap:
return bitmap, 'full' if cmd.mode == 'full' else 'auto', nodename, speed
bm = 'zsbitmap%d' % deviceId
if cmd.mode == 'full':
return bm, 'full', nodename, speed
imf = self.push_backing_files(isc, cmd.hostname, drivertype, source)
if not imf:
return bm, 'full', nodename, speed
parent = isc._build_install_path(imf.name, imf.id)
parents[deviceId] = parent
return bm, 'top', nodename, speed
backupArgs[deviceId] = get_backup_args()
logger.info('taking backup for vm: %s' % cmd.vmUuid)
res = isc.backup_volumes(cmd.vmUuid, backupArgs.values(), dstdir, Report.from_spec(cmd, "VmBackup"), get_task_stage(cmd))
logger.info('completed backup for vm: %s' % cmd.vmUuid)
backres = jsonobject.loads(res)
bkinfos = []
for deviceId in device_ids:
nodename = backupArgs[deviceId][2]
nodebak = backres[nodename]
installPath = None
if nodebak.mode == 'incremental':
installPath = self.getLastBackup(deviceId, cmd.backupInfos)
else:
installPath = parents.get(deviceId)
info = VolumeBackupInfo(deviceId,
backupArgs[deviceId][0],
nodebak.backupFile,
installPath)
if nodebak.mode == 'top' and info.parentInstallPath is None:
target_disk = target_disks[deviceId]
drivertype = target_disk.driver.type_
source = target_disk.source
imf = self.push_backing_files(isc, cmd.hostname, drivertype, source)
if imf:
parent = isc._build_install_path(imf.name, imf.id)
info.parentInstallPath = parent
bkinfos.append(info)
return bkinfos
# returns tuple: (bitmap, parent)
def do_take_volume_backup(self, cmd, drivertype, nodename, source, dest):
isc = ImageStoreClient()
bitmap = None
parent = None
mode = None
topoverlay = None
speed = 0
if drivertype == 'qcow2':
topoverlay = source.file_
def get_parent_bitmap_mode():
if cmd.bitmap:
return None, cmd.bitmap, 'full' if cmd.mode == 'full' else 'auto'
bitmap = 'zsbitmap%d' % (cmd.volume.deviceId)
if drivertype != 'qcow2':
return None, bitmap, 'full'
if cmd.mode == 'full':
return None, bitmap, 'full'
bf = linux.qcow2_get_backing_file(topoverlay)
if not bf:
return None, bitmap, 'full'
imf = isc.upload_image(cmd.hostname, bf)
parent = isc._build_install_path(imf.name, imf.id)
return parent, bitmap, 'top'
parent, bitmap, mode = get_parent_bitmap_mode()
if cmd.volumeWriteBandwidth:
speed = cmd.volumeWriteBandwidth
mode = isc.backup_volume(cmd.vmUuid, nodename, bitmap, mode, dest, speed, Report.from_spec(cmd, "VolumeBackup"), get_task_stage(cmd))
logger.info('finished backup volume with mode: %s' % mode)
if mode == 'incremental':
return bitmap, cmd.lastBackup
if mode == 'top' and parent is None and topoverlay != None:
bf = linux.qcow2_get_backing_file(topoverlay)
imf = isc.upload_image(cmd.hostname, bf)
parent = isc._build_install_path(imf.name, imf.id)
return bitmap, parent
@staticmethod
def get_backup_device_name(disk):
return ('' if disk.type_ == 'quorum' else 'drive-') + disk.alias.name_
def getLastBackup(self, deviceId, backupInfos):
for info in backupInfos:
if info.deviceId == deviceId:
return info.lastBackup
return None
def getBitmap(self, deviceId, backupInfos):
for info in backupInfos:
if info.deviceId == deviceId:
return info.bitmap
return None
@kvmagent.replyerror
def cancel_backup_jobs(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = TakeVolumesBackupsResponse()
try:
vm = get_vm_by_uuid(cmd.vmUuid, exception_if_not_existing=False)
if not vm:
raise kvmagent.KvmError("vm[uuid: %s] not found by libvirt" % cmd.vmUuid)
self.do_cancel_backup_jobs(cmd)
except kvmagent.KvmError as e:
logger.warn("cancel vm[uuid:%s] backup failed: %s" % (cmd.vmUuid, str(e)))
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def take_volumes_backups(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = TakeVolumesBackupsResponse()
vm = get_vm_by_uuid(cmd.vmUuid, exception_if_not_existing=False)
if not vm:
raise kvmagent.KvmError("vm[uuid: %s] not found by libvirt" % cmd.vmUuid)
storage = RemoteStorageFactory.get_remote_storage(cmd)
try:
storage.mount()
target_disks = {}
for volume in cmd.volumes:
target_disk, _ = vm._get_target_disk(volume)
target_disks[volume.deviceId] = target_disk
bitmaps = {}
device_ids = [volume.deviceId for volume in cmd.volumes]
for deviceId in device_ids:
bitmap = self.getBitmap(deviceId, cmd.backupInfos)
bitmaps[deviceId] = bitmap
res = self.do_take_volumes_backup(cmd,
target_disks,
bitmaps,
storage.local_work_dir)
for r in res:
r.backupFile = os.path.join(cmd.uploadDir, r.backupFile)
rsp.backupInfos = res
except Exception as e:
content = traceback.format_exc()
logger.warn("take vm[uuid:%s] backup failed: %s\n%s" % (cmd.vmUuid, str(e), content))
rsp.error = str(e)
rsp.success = False
finally:
storage.umount()
storage.clean()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def take_volume_backup(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = TakeVolumeBackupResponse()
vm = get_vm_by_uuid(cmd.vmUuid, exception_if_not_existing=False)
if not vm:
raise kvmagent.KvmError("vm[uuid: %s] not found by libvirt" % cmd.vmUuid)
storage = RemoteStorageFactory.get_remote_storage(cmd)
fname = uuidhelper.uuid()+".qcow2"
try:
storage.mount()
target_disk, _ = vm._get_target_disk(cmd.volume)
bitmap, parent = self.do_take_volume_backup(cmd,
target_disk.driver.type_, # 'qcow2' etc.
self.get_backup_device_name(target_disk), # 'virtio-disk0' etc.
target_disk.source,
os.path.join(storage.local_work_dir, fname))
logger.info('finished backup volume with parent: %s' % parent)
rsp.bitmap = bitmap
rsp.parentInstallPath = parent
rsp.backupFile = os.path.join(cmd.uploadDir, fname)
except Exception as e:
content = traceback.format_exc()
logger.warn("take volume backup failed: " + str(e) + '\n' + content)
rsp.error = str(e)
rsp.success = False
finally:
storage.umount()
storage.clean()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def block_stream(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = BlockStreamResponse()
if not cmd.vmUuid:
rsp.success = True
return jsonobject.dumps(rsp)
vm = get_vm_by_uuid(cmd.vmUuid, exception_if_not_existing=False)
if not vm:
rsp.success = True
return jsonobject.dumps(rsp)
vm.block_stream_disk(cmd.volume)
rsp.success = True
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@lock.lock('iscsiadm')
def logout_iscsi_target(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
shell.call(
'iscsiadm -m node --targetname "%s" --portal "%s:%s" --logout' % (cmd.target, cmd.hostname, cmd.port))
rsp = LogoutIscsiTargetRsp()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def login_iscsi_target(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
login = IscsiLogin()
login.server_hostname = cmd.hostname
login.server_port = cmd.port
login.chap_password = cmd.chapPassword
login.chap_username = cmd.chapUsername
login.target = cmd.target
login.login()
return jsonobject.dumps(LoginIscsiTargetRsp())
@kvmagent.replyerror
def delete_console_firewall_rule(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
vir = VncPortIptableRule()
vir.vm_internal_id = cmd.vmInternalId
vir.host_ip = cmd.hostManagementIp
vir.delete()
return jsonobject.dumps(kvmagent.AgentResponse())
@kvmagent.replyerror
def create_ceph_secret_key(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
VmPlugin._create_ceph_secret_key(cmd.userKey, cmd.uuid)
return jsonobject.dumps(kvmagent.AgentResponse())
@staticmethod
def _reload_ceph_secret_keys():
for u, k in VmPlugin.secret_keys.items():
VmPlugin._create_ceph_secret_key(k, u)
@staticmethod
def _create_ceph_secret_key(userKey, uuid):
VmPlugin.secret_keys[uuid] = userKey
sh_cmd = shell.ShellCmd('virsh secret-get-value %s' % uuid)
sh_cmd(False)
if sh_cmd.stdout.strip() == userKey:
return
elif sh_cmd.return_code == 0:
shell.call('virsh secret-set-value %s %s' % (uuid, userKey))
return
# for some reason, ceph doesn't work with the secret created by libvirt
# we have to use the command line here
content = '''
<secret ephemeral='yes' private='no'>
<uuid>%s</uuid>
<usage type='ceph'>
<name>%s</name>
</usage>
</secret>
''' % (uuid, uuid)
spath = linux.write_to_temp_file(content)
try:
o = shell.call("virsh secret-define %s" % spath)
o = o.strip(' \n\t\r')
_, generateuuid, _ = o.split()
shell.call('virsh secret-set-value %s %s' % (generateuuid, userKey))
finally:
os.remove(spath)
@staticmethod
def add_amdgpu_to_blacklist():
r_amd = bash.bash_r("grep -E 'modprobe.blacklist.*amdgpu' /etc/default/grub")
if r_amd != 0:
r_amd, o_amd, e_amd = bash.bash_roe("sed -i 's/radeon/amdgpu,radeon/g' /etc/default/grub")
if r_amd != 0:
return False, "%s %s" % (e_amd, o_amd)
r_amd, o_amd, e_amd = bash.bash_roe("grub2-mkconfig -o /boot/grub2/grub.cfg")
if r_amd != 0:
return False, "%s %s" % (e_amd, o_amd)
r_amd, o_amd, e_amd = bash.bash_roe("grub2-mkconfig -o /etc/grub2-efi.cfg")
if r_amd != 0:
return False, "%s %s" % (e_amd, o_amd)
return True, None
@kvmagent.replyerror
@in_bash
def hot_plug_pci_device(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = HotPlugPciDeviceRsp()
addr = cmd.pciDeviceAddress
domain, bus, slot, function = parse_pci_device_address(addr)
content = '''
<hostdev mode='subsystem' type='pci'>
<driver name='vfio'/>
<source>
<address type='pci' domain='0x%s' bus='0x%s' slot='0x%s' function='0x%s'/>
</source>
</hostdev>''' % (domain, bus, slot, function)
spath = linux.write_to_temp_file(content)
# do not attach pci device immediately after detach pci device from same vm
vm = get_vm_by_uuid(cmd.vmUuid)
vm._wait_vm_run_until_seconds(60)
self.timeout_object.wait_until_object_timeout('hot-unplug-pci-device-from-vm-%s' % cmd.vmUuid)
r, o, e = bash.bash_roe("virsh attach-device %s %s" % (cmd.vmUuid, spath))
self.timeout_object.put('hot-plug-pci-device-to-vm-%s' % cmd.vmUuid, timeout=30)
if r != 0:
rsp.success = False
err = self.handle_vfio_irq_conflict_with_addr(cmd.vmUuid, addr)
if err == "":
rsp.error = "failed to attach-device %s to %s: %s, %s" % (addr, cmd.vmUuid, o, e)
else:
rsp.error = "failed to handle_vfio_irq_conflict_with_addr: %s, details: %s %s" % (err, o, e)
logger.debug("attach-device %s to %s: %s, %s" % (spath, cmd.vmUuid, o, e))
return jsonobject.dumps(rsp)
@in_bash
def handle_vfio_irq_conflict_with_addr(self, vmUuid, addr):
logger.debug("check irq conflict with %s, %s" % (vmUuid, addr))
cmd = ("tail -n 5 /var/log/libvirt/qemu/%s.log | grep -E 'vfio: Error: Failed to setup INTx fd: Device or resource busy'" %
vmUuid)
r, o, e = bash.bash_roe(cmd)
if r != 0:
return ""
cmd = "lspci -vs %s | grep IRQ | awk '{print $5}' | grep -E -o '[[:digit:]]+'" % addr
r, o, e = bash.bash_roe(cmd)
if o == "":
return "can not get irq"
hostname = bash.bash_o("hostname -f")
cmd = "devices=`find /sys/devices/ -iname 'irq' | grep pci | xargs grep %s | grep -v '%s' | awk -F '/' '{ print \"/\"$2\"/\"$3\"/\"$4\"/\"$5 }' | sort | uniq`;" % (o.strip(), addr) + \
" for dev in $devices; do wc -l $dev/msi_bus; done | grep -E '^.*0 /sys' | awk -F '/' '{ print \"/\"$2\"/\"$3\"/\"$4\"/\"$5 }'"
r, o, e = bash.bash_roe(cmd)
if o == "":
return "there are irq conflict, but zstack can not get irq conflict device, you need fix it manually"
ret = ""
names = ""
for dev in o.splitlines():
if dev.strip() != "":
ret += "echo 1 > %s/remove; " % dev
cmd = "lspci -s %s" % dev.split('/')[-1]
r, o, e = bash.bash_roe(cmd)
names += o.strip()
return "WARN: found irq conflict for pci device addr %s, please execute '%s', and then try to passthrough again. Please noted, the above command will remove the conflicted devices(%s) from system, ONLY reboot can bring the device back to service." % \
(addr, ret, names)
@in_bash
def handle_vfio_irq_conflict(self, vmUuid):
cmd = ("tail -n 5 /var/log/libvirt/qemu/%s.log | grep -E 'qemu.*vfio: Error: Failed to setup INTx fd: Device or resource busy' | awk -F'[=,]' '{ print $3 }'" %
vmUuid)
r, o, e = bash.bash_roe(cmd)
if r != 0:
return ""
return self.handle_vfio_irq_conflict_with_addr(vmUuid, o.strip())
@kvmagent.replyerror
@in_bash
def hot_unplug_pci_device(self, req):
@linux.retry(3, 3)
def find_pci_device(vm_uuid, pci_addr):
domain, bus, slot, function = parse_pci_device_address(pci_addr)
cmd = """virsh dumpxml %s | grep -A3 -E '<hostdev.*pci' | grep "<address domain='0x%s' bus='0x%s' slot='0x%s' function='0x%s'/>" """ % \
(vm_uuid, domain, bus, slot, function)
r, o, e = bash.bash_roe(cmd)
return o != ""
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = HotUnplugPciDeviceRsp()
addr = cmd.pciDeviceAddress
if not find_pci_device(cmd.vmUuid, addr):
logger.debug("pci device %s not found" % addr)
return jsonobject.dumps(rsp)
domain, bus, slot, function = parse_pci_device_address(addr)
content = '''
<hostdev mode='subsystem' type='pci'>
<driver name='vfio'/>
<source>
<address type='pci' domain='0x%s' bus='0x%s' slot='0x%s' function='0x%s'/>
</source>
</hostdev>''' % (domain, bus, slot, function)
spath = linux.write_to_temp_file(content)
# no need to detach pci device if vm is shutdown
vm = get_vm_by_uuid_no_retry(cmd.vmUuid, exception_if_not_existing=False)
if not vm or vm.state == Vm.VM_STATE_SHUTDOWN:
logger.debug("vm[uuid:%s] is shutdown, no need to detach pci device" % cmd.vmUuid)
return jsonobject.dumps(rsp)
# do not detach pci device immediately after starting vm instance
try:
vm._wait_vm_run_until_seconds(60)
except Exception:
logger.debug("cannot find pid of vm[uuid:%s, state:%s], no need to detach pci device" % (cmd.vmUuid, vm.state))
return jsonobject.dumps(rsp)
# do not detach pci device immediately after attach pci device to same vm
self.timeout_object.wait_until_object_timeout('hot-plug-pci-device-to-vm-%s' % cmd.vmUuid)
self.timeout_object.put('hot-unplug-pci-device-from-vm-%s' % cmd.vmUuid, timeout=10)
retry_num = 4
retry_interval = 5
logger.debug("try to virsh detach xml for %d times: %s" % (retry_num, content))
for i in range(1, retry_num + 1):
r, o, e = bash.bash_roe("virsh detach-device %s %s" % (cmd.vmUuid, spath))
succ = linux.wait_callback_success(lambda args: not find_pci_device(args[0], args[1]), [cmd.vmUuid, addr], timeout=retry_interval)
if succ:
break
if i < retry_num:
continue
if r != 0:
rsp.success = False
rsp.error = "failed to detach-device %s from %s: %s, %s" % (addr, cmd.vmUuid, o, e)
return jsonobject.dumps(rsp)
if not succ:
rsp.success = False
rsp.error = "pci device %s still exists on vm %s after %ds" % (addr, cmd.vmUuid, retry_num * retry_interval)
return jsonobject.dumps(rsp)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def attach_pci_device_to_host(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AttachPciDeviceToHostRsp()
addr = cmd.pciDeviceAddress
r, o, e = bash.bash_roe("virsh nodedev-reattach pci_%s" % addr.replace(':', '_').replace('.', '_'))
logger.debug("nodedev-reattach %s: %s, %s" % (addr, o, e))
if r != 0:
rsp.success = False
rsp.error = "failed to nodedev-reattach %s: %s, %s" % (addr, o, e)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def detach_pci_device_from_host(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = DetachPciDeviceFromHostRsp()
addr = cmd.pciDeviceAddress
r, o, e = bash.bash_roe("virsh nodedev-detach pci_%s" % addr.replace(':', '_').replace('.', '_'))
logger.debug("nodedev-detach %s: %s, %s" % (addr, o, e))
if r != 0:
rsp.success = False
rsp.error = "failed to nodedev-detach %s: %s, %s" % (addr, o, e)
return jsonobject.dumps(rsp)
def _get_next_usb_port(self, dom, bus):
domain_xml = dom.XMLDesc(0)
domain_xmlobject = xmlobject.loads(domain_xml)
# if arm or mips uhci, port 0, 1, 2 are hard-coded reserved
# else uhci, port 0, 1 are hard-coded reserved
if bus == 0 and HOST_ARCH in ['aarch64', 'mips64el']:
usb_ports = [0, 1, 2]
elif bus == 0:
usb_ports = [0, 1]
else:
usb_ports = [0]
for hostdev in domain_xmlobject.devices.get_child_node_as_list('hostdev'):
if hostdev.type_ == 'usb':
for address in hostdev.get_child_node_as_list('address'):
if address.type_ == 'usb' and address.bus_ == str(bus):
usb_ports.append(int(address.port_))
for redirdev in domain_xmlobject.devices.get_child_node_as_list('redirdev'):
if redirdev.type_ == 'tcp':
for address in redirdev.get_child_node_as_list('address'):
if address.type_ == 'usb' and address.bus_ == str(bus):
usb_ports.append(int(address.port_))
# get the first unused port number
for i in range(len(usb_ports) + 1):
if i not in usb_ports:
return i
@kvmagent.replyerror
def kvm_attach_usb_device(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = KvmAttachUsbDeviceRsp()
bus = int(cmd.usbVersion[0]) - 1
r, ex = self._attach_usb_by_libvirt(cmd, bus)
if not r:
rsp.success = False
rsp.error = ex
return jsonobject.dumps(rsp)
@linux.retry(times=5, sleep_time=2)
def _detach_usb_by_libvirt(self, cmd):
vm = get_vm_by_uuid(cmd.vmUuid)
root = None
if cmd.attachType == "PassThrough":
root = etree.Element('hostdev', {'mode': 'subsystem', 'type': 'usb', 'managed': 'yes'})
d = e(root, 'source')
e(d, 'vendor', None, {'id': '0x%s' % cmd.idVendor})
e(d, 'product', None, {'id': '0x%s' % cmd.idProduct})
e(d, 'address', None, {'bus': str(cmd.busNum).lstrip('0'), 'device': str(cmd.devNum).lstrip('0')})
if cmd.attachType == "Redirect":
root = etree.Element('redirdev', {'bus': 'usb', 'type': 'tcp'})
e(root, 'source', None, {'mode': 'connect', 'host': cmd.ip, 'service': str(cmd.port)})
xml = etree.tostring(root)
logger.info(xml)
try:
vm.domain.detachDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_LIVE)
except libvirt.libvirtError as ex:
logger.warn('detach usb device to domain[%s] failed: %s' % (cmd.vmUuid, str(ex)))
if "redirdev was not found" in str(ex):
logger.debug(
"cannot find matching redirdev from vm %s domainxml, maybe usb has been detached" % cmd.vmUuid)
return True
raise RetryException("failed to detach usb device from %s: %s" % (cmd.vmUuid, str(ex)))
logger.debug("detached usb device from %s successfully" % cmd.vmUuid)
def _attach_usb_by_libvirt(self, cmd, bus):
vm = get_vm_by_uuid(cmd.vmUuid)
root = None
if cmd.attachType == "PassThrough":
root = etree.Element('hostdev', {'mode': 'subsystem', 'type': 'usb', 'managed': 'yes'})
d = e(root, 'source')
e(d, 'vendor', None, {'id': '0x%s' % cmd.idVendor})
e(d, 'product', None, {'id': '0x%s' % cmd.idProduct})
e(d, 'address', None, {'bus': str(cmd.busNum).lstrip('0'), 'device': str(cmd.devNum).lstrip('0')})
e(root, 'address', None, {'type': 'usb', 'bus': str(bus), 'port': str(self._get_next_usb_port(vm.domain, bus))})
if cmd.attachType == "Redirect":
root = etree.Element('redirdev', {'bus': 'usb', 'type': 'tcp'})
e(root, 'source', None, {'mode': 'connect', 'host': cmd.ip, 'service': str(cmd.port)})
e(root, 'address', None, {'type': 'usb', 'bus': str(bus), 'port': str(self._get_next_usb_port(vm.domain, bus))})
xml = etree.tostring(root)
logger.info(xml)
try:
vm.domain.attachDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_LIVE)
except libvirt.libvirtError as ex:
logger.warn('attach usb device to domain[%s] failed: %s' % (cmd.vmUuid, str(ex)))
return False, str(ex)
return True, None
# deprecated
def _attach_usb(self, cmd, bus):
vm = get_vm_by_uuid(cmd.vmUuid)
content = ''
if cmd.attachType == "PassThrough":
content = '''
<hostdev mode='subsystem' type='usb' managed='yes'>
<source>
<vendor id='0x%s'/>
<product id='0x%s'/>
<address bus='%s' device='%s'/>
</source>
<address type='usb' bus='%s' port='%s' />
</hostdev>''' % (cmd.idVendor, cmd.idProduct, int(cmd.busNum), int(cmd.devNum), bus, self._get_next_usb_port(vm.domain, bus))
if cmd.attachType == "Redirect":
content = '''
<redirdev bus='usb' type='tcp'>
<source mode='connect' host='%s' service='%s'/>
<address type='usb' bus='%s' port='%s'/>
</redirdev>''' % (cmd.ip, int(cmd.port), bus, self._get_next_usb_port(vm.domain, bus))
spath = linux.write_to_temp_file(content)
r, o, e = bash.bash_roe("virsh attach-device %s %s" % (cmd.vmUuid, spath))
os.remove(spath)
logger.debug("attached %s to %s, %s, %s" % (
spath, cmd.vmUuid, o, e))
return r, o, e
@kvmagent.replyerror
def kvm_detach_usb_device(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = KvmDetachUsbDeviceRsp()
try:
self._detach_usb_by_libvirt(cmd)
except Exception as e:
rsp.success = False
rsp.error = str(e)
return jsonobject.dumps(rsp)
# deprecated
@linux.retry(times=5, sleep_time=2)
def _detach_usb(self, cmd):
content = ''
if cmd.attachType == "PassThrough":
content = '''
<hostdev mode='subsystem' type='usb' managed='yes'>
<source>
<vendor id='0x%s'/>
<product id='0x%s'/>
<address bus='%s' device='%s'/>
</source>
</hostdev>''' % (cmd.idVendor, cmd.idProduct, int(cmd.busNum), int(cmd.devNum))
if cmd.attachType == "Redirect":
content = '''
<redirdev bus='usb' type='tcp'>
<source mode='connect' host='%s' service='%s'/>
</redirdev>''' % (cmd.ip, int(cmd.port))
spath = linux.write_to_temp_file(content)
r, o, e = bash.bash_roe("virsh detach-device %s %s" % (cmd.vmUuid, spath))
os.remove(spath)
if r:
if "redirdev was not found" in e:
logger.debug("cannot find matching redirdev from vm %s domainxml, maybe usb has been detached" % cmd.vmUuid)
return
raise RetryException("failed to detach usb device from %s: %s, %s" % (cmd.vmUuid, o, e))
else:
logger.debug("detached usb device %s from %s" % (spath, cmd.vmUuid))
@kvmagent.replyerror
def reload_redirect_usb(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = ReloadRedirectUsbRsp()
self._detach_usb_by_libvirt(cmd)
bus = int(cmd.usbVersion[0]) - 1
r, ex = self._attach_usb_by_libvirt(cmd, bus)
if not r:
rsp.success = False
rsp.error = ex
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def vm_priority(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = UpdateVmPriorityRsp()
for pcs in cmd.priorityConfigStructs:
pid = linux.find_vm_pid_by_uuid(pcs.vmUuid)
linux.set_vm_priority(pid, pcs)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def kvm_resize_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = KvmResizeVolumeRsp()
vm = get_vm_by_uuid(cmd.vmUuid, exception_if_not_existing=False)
vm.resize_volume(cmd.volume, cmd.size)
touchQmpSocketWhenExists(cmd.vmUuid)
return jsonobject.dumps(rsp)
def _create_xml_for_guesttools_temp_disk(self, vm_uuid):
temp_disk = "/var/lib/zstack/guesttools/temp_disk_%s.qcow2" % vm_uuid
content = """
<disk type='file' device='disk'>
<driver type='qcow2' cache='writeback'/>
<source file='%s'/>
<target dev='vdz' bus='virtio'/>
</disk>
""" % temp_disk
return linux.write_to_temp_file(content)
@kvmagent.replyerror
@in_bash
def attach_guest_tools_iso_to_vm(self, req):
rsp = AttachGuestToolsIsoToVmRsp()
cmd = jsonobject.loads(req[http.REQUEST_BODY])
vm_uuid = cmd.vmInstanceUuid
if not os.path.exists(GUEST_TOOLS_ISO_PATH):
rsp.success = False
rsp.error = "%s not exists" % GUEST_TOOLS_ISO_PATH
return jsonobject.dumps(rsp)
r, _, _ = bash.bash_roe("virsh dumpxml %s | grep \"dev='vdz' bus='virtio'\"" % vm_uuid)
if cmd.needTempDisk and r != 0:
temp_disk = "/var/lib/zstack/guesttools/temp_disk_%s.qcow2" % vm_uuid
if not os.path.exists(temp_disk):
linux.qcow2_create(temp_disk, 1)
spath = self._create_xml_for_guesttools_temp_disk(vm_uuid)
r, o, e = bash.bash_roe("virsh attach-device %s %s" % (vm_uuid, spath))
# temp_disk will be truly deleted after it's closed by qemu-kvm
linux.rm_file_force(temp_disk)
if r != 0:
rsp.success = False
rsp.error = "%s, %s" % (o, e)
return jsonobject.dumps(rsp)
else:
logger.debug("attached temp disk %s to %s, %s, %s" % (spath, vm_uuid, o, e))
# attach guest tools iso to [hs]dc, whose device id is 0
vm = get_vm_by_uuid(vm_uuid, exception_if_not_existing=False)
iso = IsoTo()
iso.deviceId = 0
iso.path = GUEST_TOOLS_ISO_PATH
# in case same iso already attached
detach_cmd = DetachIsoCmd()
detach_cmd.vmUuid = vm_uuid
detach_cmd.deviceId = iso.deviceId
vm.detach_iso(detach_cmd)
attach_cmd = AttachIsoCmd()
attach_cmd.iso = iso
attach_cmd.vmUuid = vm_uuid
vm.attach_iso(attach_cmd)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def detach_guest_tools_iso_from_vm(self, req):
rsp = DetachGuestToolsIsoFromVmRsp()
cmd = jsonobject.loads(req[http.REQUEST_BODY])
vm_uuid = cmd.vmInstanceUuid
# detach temp_disk from vm
spath = self._create_xml_for_guesttools_temp_disk(vm_uuid)
bash.bash_roe("virsh detach-device %s %s" % (vm_uuid, spath))
# detach guesttools iso from vm
r, _, _ = bash.bash_roe("virsh dumpxml %s | grep %s" % (vm_uuid, GUEST_TOOLS_ISO_PATH))
if r == 0:
vm = get_vm_by_uuid(vm_uuid, exception_if_not_existing=False)
detach_cmd = DetachIsoCmd()
detach_cmd.vmUuid = vm_uuid
detach_cmd.deviceId = 0
vm.detach_iso(detach_cmd)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def get_vm_guest_tools_info(self, req):
rsp = GetVmGuestToolsInfoRsp()
cmd = jsonobject.loads(req[http.REQUEST_BODY])
# get guest tools info by reading VERSION file inside vm
vm_uuid = cmd.vmInstanceUuid
r, o, e = bash.bash_roe('virsh qemu-agent-command %s --cmd \'{"execute":"guest-file-open", \
"arguments":{"path":"C:\\\Program Files\\\Common Files\\\GuestTools\\\VERSION", "mode":"r"}}\'' % vm_uuid)
if r != 0:
_r, _o, _e = bash.bash_roe("virsh qemu-agent-command %s --cmd '{\"execute\":\"guest-tools-info\"}'" % vm_uuid)
if _r == 0:
info = simplejson.loads(_o)['return']
for k in info.keys():
setattr(rsp, k, info[k])
return jsonobject.dumps(rsp)
else:
rsp.success = False
rsp.error = "%s, %s" % (o, e)
return jsonobject.dumps(rsp)
fd = simplejson.loads(o)['return']
def _close_version_file():
bash.bash_roe('virsh qemu-agent-command %s --cmd \'{"execute":"guest-file-close", "arguments":{"handle":%s}}\'' % (vm_uuid, fd))
r, o, e = bash.bash_roe('virsh qemu-agent-command %s --cmd \'{"execute":"guest-file-read", "arguments":{"handle":%s}}\'' % (vm_uuid, fd))
if r != 0:
_close_version_file()
rsp.success = False
rsp.error = "%s, %s" % (o, e)
return jsonobject.dumps(rsp)
version = base64.b64decode(simplejson.loads(o)['return']['buf-b64']).strip()
rsp.version = version
rsp.status = 'Running'
_close_version_file()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def fail_colo_pvm(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
r, _, e = linux.sshpass_run(cmd.targetHostIp, cmd.targetHostPassword, "pkill -f 'qemu-system-x86_64 -name guest=%s'" % cmd.vmInstanceUuid, "root", cmd.targetHostPort)
if r != 0:
rsp.success = False
rsp.error = 'failed to kill vm %s on host %s, cause: %s' % (cmd.vmInstanceUuid, cmd.targetHostIp, e)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def rollback_quorum_config(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
vm = get_vm_by_uuid_no_retry(cmd.vmInstanceUuid, False)
if not vm:
raise Exception('vm[uuid:%s] not exists, failed' % cmd.vmInstanceUuid)
count = 0
for alias_name in vm._get_all_volume_alias_names(cmd.volumes):
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "x-blockdev-change",'
' "arguments": {"parent": "%s", "child": "children.1"}}' % alias_name)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "human-monitor-command",'
' "arguments":{"command-line": "drive_del replication%s"}}' % count)
count += 1
for i in xrange(0, cmd.nicNumber):
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"fm-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"primary-out-redirect-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"primary-in-redirect-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"comp-%s"}}' % i)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def wait_secondary_vm_ready(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
def wait_for_colo_state_change(_):
vm = get_vm_by_uuid_no_retry(cmd.vmInstanceUuid, False)
if not vm:
raise Exception('vm[uuid:%s] not exists, failed' % cmd.vmInstanceUuid)
r, o, err = execute_qmp_command(cmd.vmInstanceUuid, '{"execute":"query-colo-status"}')
if err:
raise Exception('Failed to check vm[uuid:%s] colo status by query-colo-status' % cmd.vmInstanceUuid)
colo_status = json.loads(o)['return']
mode = colo_status['mode']
return mode == 'secondary'
if not linux.wait_callback_success(wait_for_colo_state_change, None, interval=3, timeout=cmd.coloCheckTimeout):
raise Exception('unable to wait secondary vm[uuid:%s] ready, after %s seconds'
% (cmd.vmInstanceUuid, cmd.coloCheckTimeout))
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def check_colo_vm_state(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
states = get_all_vm_states()
rsp = CheckColoVmStateRsp()
state = states.get(cmd.vmInstanceUuid)
if state != Vm.VM_STATE_RUNNING or state != Vm.VIR_DOMAIN_PAUSED:
rsp.state = state
return jsonobject.dumps(rsp)
r, o, err = execute_qmp_command(cmd.vmInstanceUuid, '{"execute":"query-colo-status"}')
if err:
rsp.success = False
rsp.error = "Failed to check vm colo status"
return jsonobject.dumps(rsp)
colo_status = json.loads(o)['return']
rsp.mode = colo_status['mode']
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def register_primary_vm_heartbeat(self, req):
rsp = kvmagent.AgentResponse()
cmd = jsonobject.loads(req[http.REQUEST_BODY])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10)
try:
s.connect((cmd.targetHostIp, cmd.heartbeatPort))
logger.debug("Successfully test heartbeat to address[%s:%s]" % (cmd.targetHostIp, cmd.heartbeatPort))
except socket.error as ex:
logger.debug("Failed to detect heartbeat connection return error")
rsp.success = False
rsp.error = "Failed connect to heartbeat address[%s:%s], because %s" % (cmd.targetHostIp, cmd.heartbeatPort, ex)
finally:
s.close()
if not rsp.success:
return jsonobject.dumps(rsp)
if self.vm_heartbeat.get(cmd.vmInstanceUuid) is not None and self.vm_heartbeat.get(
cmd.vmInstanceUuid).is_alive():
logger.debug("vm heartbeat thread exists, skip it")
return jsonobject.dumps(rsp)
self.vm_heartbeat[cmd.vmInstanceUuid] = thread.ThreadFacade.run_in_thread(self.start_vm_heart_beat, (cmd,))
if self.vm_heartbeat.get(cmd.vmInstanceUuid).is_alive():
logger.debug("successfully start vm heartbeat")
else:
logger.debug("Failed to start vm heartbeat")
rsp.success = False
rsp.error = "Failed to start vm heartbeat address[%s:%s]" % (cmd.targetHostIp, cmd.heartbeatPort)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def start_colo_sync(self, req):
rsp = kvmagent.AgentResponse()
cmd = jsonobject.loads(req[http.REQUEST_BODY])
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "qmp_capabilities"}')
vm = get_vm_by_uuid_no_retry(cmd.vmInstanceUuid, False)
if not vm:
raise Exception('vm[uuid:%s] not exists, failed' % cmd.vmInstanceUuid)
count = 0
replication_list = []
def colo_qemu_replication_cleanup():
for replication in replication_list:
if replication.alias_name:
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "x-blockdev-change",'
' "arguments": {"parent": "%s", "child": "children.1"}}' % replication.alias_name)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "human-monitor-command",'
' "arguments":{"command-line": "drive_del replication%s"}}' % replication.replication_id)
@linux.retry(times=3, sleep_time=0.5)
def add_nbd_client_to_quorum(alias_name, count):
r, stdout, err = execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "x-blockdev-change","arguments":'
'{"parent": "%s","node": "replication%s" } }' % (alias_name, count))
if err:
return False
elif 'does not support adding a child' in stdout:
raise RetryException("failed to add child to %s" % alias_name)
else:
return True
for alias_name in vm._get_all_volume_alias_names(cmd.volumes):
if cmd.fullSync:
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "drive-mirror", "arguments":{ "device": "%s",'
' "job-id": "zs-ft-resync", "target": "nbd://%s:%s/parent%s",'
' "mode": "existing", "format": "nbd", "sync": "full"} }'
% (alias_name, cmd.secondaryVmHostIp, cmd.nbdServerPort, count))
while True:
time.sleep(3)
r, o, err = execute_qmp_command(cmd.vmInstanceUuid, '{"execute":"query-block-jobs"}')
if err:
rsp.success = False
rsp.error = "Failed to get zs-ft-resync job, report error"
return jsonobject.dumps(rsp)
block_jobs = json.loads(o)['return']
job = next((job for job in block_jobs if job['device'] == 'zs-ft-resync'), None)
if not job:
logger.debug("job finished, start colo sync")
break
if job['status'] == 'ready':
break
logger.debug("current resync %s/%s, percentage %s" % (
job['len'], job['offset'], 100 * (float(job['offset'] / float(job['len'])))))
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "stop"}')
execute_qmp_command(cmd.vmInstanceUuid,
'{"execute": "block-job-cancel", "arguments":{ "device": "zs-ft-resync"}}')
while True:
time.sleep(1)
r, o, err = execute_qmp_command(cmd.vmInstanceUuid, '{"execute":"query-block-jobs"}')
if err:
rsp.success = False
rsp.error = "Failed to query block jobs, report error"
return jsonobject.dumps(rsp)
block_jobs = json.loads(o)['return']
job = next((job for job in block_jobs if job['device'] == 'zs-ft-resync'), None)
if job:
continue
break
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "human-monitor-command","arguments":'
' {"command-line":"drive_add -n buddy'
' driver=replication,mode=primary,file.driver=nbd,file.host=%s,'
'file.port=%s,file.export=parent%s,node-name=replication%s"}}'
% (cmd.secondaryVmHostIp, cmd.nbdServerPort, count, count))
successed = False
try:
successed = add_nbd_client_to_quorum(alias_name, count)
except Exception as e:
logger.debug("ignore excetion raised by retry")
if not successed:
replication_list.append(ColoReplicationConfig(None, count))
colo_qemu_replication_cleanup()
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "cont"}')
rsp.success = False
rsp.error = "Failed to setup quorum replication node, report error"
return jsonobject.dumps(rsp)
replication_list.append(ColoReplicationConfig(alias_name, count))
count+=1
domain_xml = vm.domain.XMLDesc(0)
is_origin_secondary = 'filter-rewriter' in domain_xml
for count in xrange(0, cmd.nicNumber):
if not is_origin_secondary:
execute_qmp_command(cmd.vmInstanceUuid,
'{"execute": "object-add", "arguments":{ "qom-type": "filter-mirror", "id": "fm-%s",'
' "props": { "netdev": "hostnet%s", "queue": "tx", "outdev": "zs-mirror-%s" } } }'
% (count, count, count))
execute_qmp_command(cmd.vmInstanceUuid,
'{"execute": "object-add", "arguments":{ "qom-type": "filter-redirector",'
' "id": "primary-out-redirect-%s", "props": { "netdev": "hostnet%s", "queue": "rx",'
' "indev": "primary-out-s-%s"}}}' % (count, count, count))
execute_qmp_command(cmd.vmInstanceUuid,
'{"execute": "object-add", "arguments":{ "qom-type": "filter-redirector", "id":'
' "primary-in-redirect-%s", "props": { "netdev": "hostnet%s", "queue": "rx",'
' "outdev": "primary-in-s-%s"}}}' % (count, count, count))
else:
execute_qmp_command(cmd.vmInstanceUuid,
'{"execute": "object-add", "arguments":{ "qom-type": "filter-mirror",'
' "id": "fm-%s", "props": { "insert": "before", "position": "id=rew-%s", '
' "netdev": "hostnet%s", "queue": "tx", "outdev": "zs-mirror-%s" } } }'
% (count, count, count, count))
execute_qmp_command(cmd.vmInstanceUuid,
'{"execute": "object-add", "arguments":{ "qom-type": "filter-redirector",'
' "id": "primary-out-redirect-%s", "props":'
' { "insert": "before", "position": "id=rew-%s",'
' "netdev": "hostnet%s", "queue": "rx",'
' "indev": "primary-out-s-%s"}}}' % (count, count, count, count))
execute_qmp_command(cmd.vmInstanceUuid,
'{"execute": "object-add", "arguments":{ "qom-type": "filter-redirector", "id":'
' "primary-in-redirect-%s", "props": { "insert": "before", "position": "id=rew-%s",'
' "netdev": "hostnet%s", "queue": "rx",'
' "outdev": "primary-in-s-%s"}}}' % (count, count, count, count))
execute_qmp_command(cmd.vmInstanceUuid,
'{"execute": "object-add", "arguments":{ "qom-type": "colo-compare", "id": "comp-%s",'
' "props": { "primary_in": "primary-in-c-%s", "secondary_in": "secondary-in-s-%s",'
' "outdev":"primary-out-c-%s", "iothread": "iothread%s" } } }'
% (count, count, count, count, int(count) + 1))
count += 1
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "migrate-set-capabilities","arguments":'
'{"capabilities":[ {"capability": "x-colo", "state":true}]}}')
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "migrate-set-parameters", "arguments":'
'{ "max-bandwidth": 3355443200 }}')
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "migrate", "arguments": {"uri": "tcp:%s:%s"}}'
% (cmd.secondaryVmHostIp, cmd.blockReplicationPort))
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "migrate-set-parameters",'
' "arguments": {"x-checkpoint-delay": %s}}'
% cmd.checkpointDelay)
def colo_qemu_object_cleanup():
for i in xrange(cmd.nicNumber):
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"fm-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"primary-out-redirect-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"primary-in-redirect-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"comp-%s"}}' % i)
# wait primary vm migrate job finished
failure = 0
while True:
r, o, err = execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "query-migrate"}')
if err:
rsp.success = False
rsp.error = "Failed to query migrate info, because %s" % err
colo_qemu_object_cleanup()
break
migrate_info = json.loads(o)['return']
if migrate_info['status'] == 'colo':
logger.debug("migrate finished")
break
elif migrate_info['status'] == 'active':
ram_info = migrate_info['ram']
logger.debug("current migrate %s/%s, percentage %s"
% (ram_info['total'], ram_info['remaining'], 100 * (float(ram_info['remaining'] / float(ram_info['total'])))))
elif migrate_info['status'] == 'failed':
rsp.success = False
rsp.error = "could not finish colo migration."
try:
vm = get_vm_by_uuid_no_retry(cmd.vmInstanceUuid, False)
if vm:
vm.resume()
logger.debug('successfully, resume vm [uuid:%s]' % cmd.uuid)
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
break
else:
# those status are not handled but vm should not stuck in
# MIGRATION_STATUS_POSTCOPY_ACTIVE:
# MIGRATION_STATUS_POSTCOPY_PAUSED:
# MIGRATION_STATUS_POSTCOPY_RECOVER:
# MIGRATION_STATUS_SETUP:
# MIGRATION_STATUS_PRE_SWITCHOVER:
# MIGRATION_STATUS_DEVICE:
if failure < 2:
failure += 1
else:
rsp.success = False
rsp.error = "unknown migrate status: %s" % migrate_info['status']
# cancel migrate if vm stuck in unexpected status
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "migrate_cancel"}')
break
time.sleep(2)
if not rsp.success:
colo_qemu_object_cleanup()
colo_qemu_replication_cleanup()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def config_secondary_vm(self, req):
rsp = kvmagent.AgentResponse()
cmd = jsonobject.loads(req[http.REQUEST_BODY])
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "qmp_capabilities"}')
execute_qmp_command(cmd.vmInstanceUuid, '{"execute":"nbd-server-start", "arguments":{"addr":{"type":"inet",'
' "data":{"host":"%s", "port":"%s"}}}}'
% (cmd.primaryVmHostIp, cmd.nbdServerPort))
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "nbd-server-add",'
' "arguments": {"device": "parent0", "writable": true }}')
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def config_primary_vm(self, req):
rsp = GetVmFirstBootDeviceRsp()
cmd = jsonobject.loads(req[http.REQUEST_BODY])
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "qmp_capabilities"}')
r, o, err = execute_qmp_command(cmd.vmInstanceUuid, '{"execute":"query-chardev"}')
if err:
rsp.success = False
rsp.error = "Failed to check qemu config, report error"
return jsonobject.dumps(rsp)
vm = get_vm_by_uuid(cmd.vmInstanceUuid)
domain_xml = vm.domain.XMLDesc(0)
is_origin_secondary = 'filter-rewriter' in domain_xml
char_devices = json.loads(o)['return']
mirror_device_nums = [int(dev['label'][-1]) for dev in char_devices if dev['label'].startswith('zs-mirror')]
logger.debug("get mirror char device of vm[uuid:%s] devices: %s" % (cmd.vmInstanceUuid, mirror_device_nums))
if len(mirror_device_nums) == len(cmd.configs):
logger.debug("config and devices matched, just return success")
return jsonobject.dumps(rsp)
elif len(mirror_device_nums) > len(cmd.configs):
logger.debug("vm over config, please check what happened")
return jsonobject.dumps(rsp)
count = len(mirror_device_nums)
for config in cmd.configs[len(mirror_device_nums):]:
if not linux.is_port_available(config.mirrorPort):
raise Exception("failed to config primary vm, because mirrorPort port %d is occupied" % config.mirrorPort)
if not linux.is_port_available(config.primaryInPort):
raise Exception("failed to config primary vm, because primaryInPort port %d is occupied" % config.primaryInPort)
if not linux.is_port_available(config.secondaryInPort):
raise Exception("failed to config primary vm, because secondaryInPort port %d is occupied" % config.secondaryInPort)
if not linux.is_port_available(config.primaryOutPort):
raise Exception("failed to config primary vm, because mirrorPort port %d is occupied" % config.primaryOutPort)
execute_qmp_command(cmd.vmInstanceUuid,
'{"execute": "chardev-add", "arguments":{ "id": "zs-mirror-%s", "backend":'
' {"type": "socket", "data": {"addr": { "type": "inet", "data":'
' { "host": "%s", "port": "%s" } }, "server": true}}}}'
% (count, cmd.hostIp, config.mirrorPort))
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "chardev-add", "arguments":{ "id": "primary-in-s-%s",'
' "backend": {"type": "socket", "data": {"addr": { "type":'
' "inet", "data": { "host": "%s", "port": "%s" } },'
' "server": true } } } }' % (count, cmd.hostIp, config.primaryInPort))
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "chardev-add", "arguments":{'
' "id": "secondary-in-s-%s","backend": {"type":'
' "socket", "data": {"addr": {"type":'
' "inet", "data": { "host": "%s", "port": "%s" } },'
' "server": true } } } }' % (count, cmd.hostIp, config.secondaryInPort))
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "chardev-add", "arguments":{ "id": "primary-in-c-%s",'
' "backend": {"type": "socket", "data": {"addr": { "type":'
' "inet", "data": { "host": "%s", "port": "%s" } },'
' "server": false } } } }' % (count, cmd.hostIp, config.primaryInPort))
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "chardev-add", "arguments":{ "id": "primary-out-s-%s",'
' "backend": {"type": "socket", "data": {"addr": { "type":'
' "inet", "data": { "host": "%s", "port": "%s" } },'
' "server": true } } } }' % (count, cmd.hostIp, config.primaryOutPort))
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "chardev-add", "arguments":{ "id": "primary-out-c-%s",'
' "backend": {"type": "socket", "data": {"addr": { "type":'
' "inet", "data": { "host": "%s", "port": "%s" } },'
' "server": false } } } }' % (count, cmd.hostIp, config.primaryOutPort))
count += 1
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def get_vm_first_boot_device(self, req):
rsp = GetVmFirstBootDeviceRsp()
cmd = jsonobject.loads(req[http.REQUEST_BODY])
vm_uuid = cmd.uuid
vm = get_vm_by_uuid_no_retry(vm_uuid, False)
boot_dev = find_domain_first_boot_device(vm.domain.XMLDesc(0))
rsp.firstBootDevice = boot_dev
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_vm_device_address(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
vm_uuid = cmd.uuid
rsp = GetVmDeviceAddressRsp()
vm = get_vm_by_uuid_no_retry(vm_uuid, False)
for resource_type in cmd.deviceTOs.__dict__.keys():
tos = getattr(cmd.deviceTOs, resource_type)
if resource_type == 'VolumeVO':
addresses = VmPlugin._find_volume_device_address(vm, tos)
else:
addresses = []
rsp.addresses[resource_type] = addresses
return jsonobject.dumps(rsp)
@staticmethod
def _find_volume_device_address(vm, volumes):
# type:(Vm, list[jsonobject.JsonObject]) -> list[VmDeviceAddress]
addresses = []
o = simplejson.loads(shell.call('virsh qemu-monitor-command %s --cmd \'{"execute":"query-pci"}\'' % vm.uuid))
# only PCI buses up to 0 are available
devices = o['return'][0]['devices']
for vol in volumes:
disk, _ = vm._get_target_disk(vol)
if hasattr(disk, 'wwn'):
addresses.append(VmDeviceAddress(vol.volumeUuid, 'disk', 'wwn', disk.wwn.text_))
continue
elif disk.address.type_ == 'pci':
device = VmPlugin._find_pci_device(devices, disk.alias.name_)
if device:
addresses.append(VmDeviceAddress(vol.volumeUuid, 'disk', 'pci', pci.fmt_pci_address(device)))
continue
addresses.append(VmDeviceAddress(vol.volumeUuid, 'disk', disk.target.bus_, 'unknown'))
return addresses
@staticmethod
def _find_pci_device(devices, qdev_id):
for device in devices:
if device['qdev_id'] == qdev_id:
return device
elif 'pci_bridge' in device:
target = VmPlugin._find_pci_device(device['pci_bridge']['devices'], qdev_id)
if target:
return target
return None
def start(self):
http_server = kvmagent.get_http_server()
http_server.register_async_uri(self.KVM_START_VM_PATH, self.start_vm, cmd=StartVmCmd())
http_server.register_async_uri(self.KVM_STOP_VM_PATH, self.stop_vm)
http_server.register_async_uri(self.KVM_PAUSE_VM_PATH, self.pause_vm)
http_server.register_async_uri(self.KVM_RESUME_VM_PATH, self.resume_vm)
http_server.register_async_uri(self.KVM_REBOOT_VM_PATH, self.reboot_vm)
http_server.register_async_uri(self.KVM_DESTROY_VM_PATH, self.destroy_vm)
http_server.register_async_uri(self.KVM_GET_CONSOLE_PORT_PATH, self.get_console_port)
http_server.register_async_uri(self.KVM_ONLINE_CHANGE_CPUMEM_PATH, self.online_change_cpumem)
http_server.register_async_uri(self.KVM_ONLINE_INCREASE_CPU_PATH, self.online_increase_cpu)
http_server.register_async_uri(self.KVM_ONLINE_INCREASE_MEMORY_PATH, self.online_increase_mem)
http_server.register_async_uri(self.KVM_VM_SYNC_PATH, self.vm_sync)
http_server.register_async_uri(self.KVM_ATTACH_VOLUME, self.attach_data_volume)
http_server.register_async_uri(self.KVM_DETACH_VOLUME, self.detach_data_volume)
http_server.register_async_uri(self.KVM_ATTACH_ISO_PATH, self.attach_iso)
http_server.register_async_uri(self.KVM_DETACH_ISO_PATH, self.detach_iso)
http_server.register_async_uri(self.KVM_MIGRATE_VM_PATH, self.migrate_vm)
http_server.register_async_uri(self.KVM_BLOCK_LIVE_MIGRATION_PATH, self.block_migrate_vm)
http_server.register_async_uri(self.KVM_VM_CHECK_VOLUME_PATH, self.check_volume)
http_server.register_async_uri(self.KVM_TAKE_VOLUME_SNAPSHOT_PATH, self.take_volume_snapshot)
http_server.register_async_uri(self.KVM_TAKE_VOLUME_BACKUP_PATH, self.take_volume_backup, cmd=TakeVolumeBackupCommand())
http_server.register_async_uri(self.KVM_TAKE_VOLUMES_SNAPSHOT_PATH, self.take_volumes_snapshots)
http_server.register_async_uri(self.KVM_TAKE_VOLUMES_BACKUP_PATH, self.take_volumes_backups, cmd=TakeVolumesBackupsCommand())
http_server.register_async_uri(self.KVM_CANCEL_VOLUME_BACKUP_JOBS_PATH, self.cancel_backup_jobs)
http_server.register_async_uri(self.KVM_BLOCK_STREAM_VOLUME_PATH, self.block_stream)
http_server.register_async_uri(self.KVM_MERGE_SNAPSHOT_PATH, self.merge_snapshot_to_volume)
http_server.register_async_uri(self.KVM_LOGOUT_ISCSI_TARGET_PATH, self.logout_iscsi_target, cmd=LoginIscsiTargetCmd())
http_server.register_async_uri(self.KVM_LOGIN_ISCSI_TARGET_PATH, self.login_iscsi_target)
http_server.register_async_uri(self.KVM_ATTACH_NIC_PATH, self.attach_nic)
http_server.register_async_uri(self.KVM_DETACH_NIC_PATH, self.detach_nic)
http_server.register_async_uri(self.KVM_UPDATE_NIC_PATH, self.update_nic)
http_server.register_async_uri(self.KVM_CREATE_SECRET, self.create_ceph_secret_key)
http_server.register_async_uri(self.KVM_VM_CHECK_STATE, self.check_vm_state)
http_server.register_async_uri(self.KVM_VM_CHANGE_PASSWORD_PATH, self.change_vm_password, cmd=ChangeVmPasswordCmd())
http_server.register_async_uri(self.KVM_SET_VOLUME_BANDWIDTH, self.set_volume_bandwidth)
http_server.register_async_uri(self.KVM_DELETE_VOLUME_BANDWIDTH, self.delete_volume_bandwidth)
http_server.register_async_uri(self.KVM_GET_VOLUME_BANDWIDTH, self.get_volume_bandwidth)
http_server.register_async_uri(self.KVM_SET_NIC_QOS, self.set_nic_qos)
http_server.register_async_uri(self.KVM_GET_NIC_QOS, self.get_nic_qos)
http_server.register_async_uri(self.KVM_HARDEN_CONSOLE_PATH, self.harden_console)
http_server.register_async_uri(self.KVM_DELETE_CONSOLE_FIREWALL_PATH, self.delete_console_firewall_rule)
http_server.register_async_uri(self.HOT_PLUG_PCI_DEVICE, self.hot_plug_pci_device)
http_server.register_async_uri(self.HOT_UNPLUG_PCI_DEVICE, self.hot_unplug_pci_device)
http_server.register_async_uri(self.ATTACH_PCI_DEVICE_TO_HOST, self.attach_pci_device_to_host)
http_server.register_async_uri(self.DETACH_PCI_DEVICE_FROM_HOST, self.detach_pci_device_from_host)
http_server.register_async_uri(self.KVM_ATTACH_USB_DEVICE_PATH, self.kvm_attach_usb_device)
http_server.register_async_uri(self.KVM_DETACH_USB_DEVICE_PATH, self.kvm_detach_usb_device)
http_server.register_async_uri(self.RELOAD_USB_REDIRECT_PATH, self.reload_redirect_usb)
http_server.register_async_uri(self.CHECK_MOUNT_DOMAIN_PATH, self.check_mount_domain)
http_server.register_async_uri(self.KVM_RESIZE_VOLUME_PATH, self.kvm_resize_volume)
http_server.register_async_uri(self.VM_PRIORITY_PATH, self.vm_priority)
http_server.register_async_uri(self.ATTACH_GUEST_TOOLS_ISO_TO_VM_PATH, self.attach_guest_tools_iso_to_vm)
http_server.register_async_uri(self.DETACH_GUEST_TOOLS_ISO_FROM_VM_PATH, self.detach_guest_tools_iso_from_vm)
http_server.register_async_uri(self.GET_VM_GUEST_TOOLS_INFO_PATH, self.get_vm_guest_tools_info)
http_server.register_async_uri(self.KVM_GET_VM_FIRST_BOOT_DEVICE_PATH, self.get_vm_first_boot_device)
http_server.register_async_uri(self.KVM_CONFIG_PRIMARY_VM_PATH, self.config_primary_vm)
http_server.register_async_uri(self.KVM_CONFIG_SECONDARY_VM_PATH, self.config_secondary_vm)
http_server.register_async_uri(self.KVM_START_COLO_SYNC_PATH, self.start_colo_sync)
http_server.register_async_uri(self.KVM_REGISTER_PRIMARY_VM_HEARTBEAT, self.register_primary_vm_heartbeat)
http_server.register_async_uri(self.CHECK_COLO_VM_STATE_PATH, self.check_colo_vm_state)
http_server.register_async_uri(self.WAIT_COLO_VM_READY_PATH, self.wait_secondary_vm_ready)
http_server.register_async_uri(self.ROLLBACK_QUORUM_CONFIG_PATH, self.rollback_quorum_config)
http_server.register_async_uri(self.FAIL_COLO_PVM_PATH, self.fail_colo_pvm, cmd=FailColoPrimaryVmCmd())
http_server.register_async_uri(self.GET_VM_DEVICE_ADDRESS_PATH, self.get_vm_device_address)
self.clean_old_sshfs_mount_points()
self.register_libvirt_event()
self.register_qemu_log_cleaner()
self.enable_auto_extend = True
self.auto_extend_size = 1073741824 * 2
# the virtio-channel directory used by VR.
# libvirt won't create this directory when migrating a VR,
# we have to do this otherwise VR migration may fail
linux.mkdir('/var/lib/zstack/kvm/agentSocket/')
@thread.AsyncThread
def wait_end_signal():
while True:
try:
self.queue_singleton.queue.get(True)
while http.AsyncUirHandler.HANDLER_COUNTER.get() != 0:
time.sleep(0.1)
# the libvirt has been stopped or restarted
# to prevent fd leak caused by broken libvirt connection
# we have to ask mgmt server to reboot the agent
url = self.config.get(kvmagent.SEND_COMMAND_URL)
if not url:
logger.warn('cannot find SEND_COMMAND_URL, unable to ask the mgmt server to reconnect us')
os._exit(1)
host_uuid = self.config.get(kvmagent.HOST_UUID)
if not host_uuid:
logger.warn('cannot find HOST_UUID, unable to ask the mgmt server to reconnect us')
os._exit(1)
logger.warn("libvirt has been rebooted or stopped, ask the mgmt server to reconnt us")
cmd = ReconnectMeCmd()
cmd.hostUuid = host_uuid
cmd.reason = "libvirt rebooted or stopped"
http.json_dump_post(url, cmd, {'commandpath': '/kvm/reconnectme'})
os._exit(1)
except:
content = traceback.format_exc()
logger.warn(content)
finally:
os._exit(1)
wait_end_signal()
@thread.AsyncThread
def monitor_libvirt():
while True:
pid = linux.get_libvirtd_pid()
if not pid or not linux.process_exists(pid):
logger.warn(
"cannot find the libvirt process, assume it's dead, ask the mgmt server to reconnect us")
_stop_world()
time.sleep(20)
monitor_libvirt()
@thread.AsyncThread
def clean_stale_vm_vnc_port_chain():
while True:
logger.debug("do clean up stale vnc port iptable chains")
cleanup_stale_vnc_iptable_chains()
time.sleep(600)
clean_stale_vm_vnc_port_chain()
def start_vm_heart_beat(self, cmd):
def send_failover(vm_instance_uuid, host_uuid, primary_failure):
url = self.config.get(kvmagent.SEND_COMMAND_URL)
if not url:
logger.warn('cannot find SEND_COMMAND_URL')
return
logger.warn("heartbeat of vm %s lost, failover" % vm_instance_uuid)
fcmd = FailOverCmd()
fcmd.vmInstanceUuid = vm_instance_uuid
fcmd.reason = "network failure"
fcmd.hostUuid = host_uuid
fcmd.primaryVmFailure = primary_failure
try:
http.json_dump_post(url, fcmd, {'commandpath': '/kvm/reportfailover'})
except Exception as e:
logger.debug('failed to report fail')
def test_heart_beat():
logger.debug("vm [uuid:%s] heartbeat finished", cmd.vmInstanceUuid)
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.settimeout(0.5)
try:
s.connect((cmd.targetHostIp, cmd.heartbeatPort))
logger.debug("successfully connect to address[%s:%s]" % (cmd.targetHostIp, cmd.heartbeatPort))
except socket.error as ex:
logger.debug(
"lost heartbeat to %s:%s, because %s" % (cmd.targetHostIp, cmd.heartbeatPort, ex))
if cmd.coloPrimary:
vm = get_vm_by_uuid_no_retry(cmd.vmInstanceUuid, False)
if not vm:
raise Exception('vm[uuid:%s] not exists, failed' % cmd.vmInstanceUuid)
count = 0
for alias_name in vm._get_all_volume_alias_names(cmd.volumes):
execute_qmp_command(cmd.vmInstanceUuid,
'{"execute": "x-blockdev-change", "arguments": {"parent":'
' "%s", "child": "children.1"}}' % alias_name)
execute_qmp_command(cmd.vmInstanceUuid,
'{"execute": "human-monitor-command", "arguments":'
'{"command-line": "drive_del replication%s" } }' % count)
count += 1
for i in xrange(cmd.redirectNum):
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"fm-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"primary-out-redirect-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"primary-in-redirect-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"comp-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "x-colo-lost-heartbeat"}')
else:
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "nbd-server-stop"}')
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "x-colo-lost-heartbeat"}')
for i in xrange(cmd.redirectNum):
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{"id":"fr-secondary-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "object-del",'
'"arguments":{ "id": "fr-mirror-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "chardev-remove",'
'"arguments":{"id":"red-secondary-%s"}}' % i)
execute_qmp_command(cmd.vmInstanceUuid, '{"execute": "chardev-remove",'
'"arguments":{"id":"red-mirror-%s"}}' % i)
send_failover(cmd.vmInstanceUuid, cmd.hostUuid, not cmd.coloPrimary)
return True
logger.debug("vm [uuid:%s] heartbeat finished", cmd.vmInstanceUuid)
return False
t = threading.currentThread()
while getattr(t, "do_heart_beat", True):
need_break = test_heart_beat()
if need_break:
break
time.sleep(1)
try:
self.vm_heartbeat.pop(cmd.vmInstanceUuid)
except KeyError:
logger.debug("ignore error occurs when remove %s from heartbeat",
cmd.vmInstanceUuid)
def _vm_lifecycle_event(self, conn, dom, event, detail, opaque):
try:
evstr = LibvirtEventManager.event_to_string(event)
vm_uuid = dom.name()
if evstr not in (LibvirtEventManager.EVENT_STARTED, LibvirtEventManager.EVENT_STOPPED):
logger.debug("ignore event[%s] of the vm[uuid:%s]" % (evstr, vm_uuid))
return
if vm_uuid.startswith("guestfs-"):
logger.debug("[vm_lifecycle]ignore the temp vm[%s] while using guestfish" % vm_uuid)
return
vm_op_judger = self._get_operation(vm_uuid)
if vm_op_judger and evstr in vm_op_judger.ignore_libvirt_events():
# this is an operation originated from ZStack itself
logger.debug(
'ignore event[%s] for the vm[uuid:%s], this operation is from ZStack itself' % (evstr, vm_uuid))
if vm_op_judger.remove_expected_event(evstr) == 0:
self._remove_operation(vm_uuid)
logger.debug(
'events happened of the vm[uuid:%s] meet the expectation, delete the operation judger' % vm_uuid)
return
# this is an operation outside zstack, report it
url = self.config.get(kvmagent.SEND_COMMAND_URL)
if not url:
logger.warn('cannot find SEND_COMMAND_URL, unable to report abnormal operation[vm:%s, op:%s]' % (
vm_uuid, evstr))
return
host_uuid = self.config.get(kvmagent.HOST_UUID)
if not host_uuid:
logger.warn(
'cannot find HOST_UUID, unable to report abnormal operation[vm:%s, op:%s]' % (vm_uuid, evstr))
return
@thread.AsyncThread
def report_to_management_node():
cmd = ReportVmStateCmd()
cmd.vmUuid = vm_uuid
cmd.hostUuid = host_uuid
if evstr == LibvirtEventManager.EVENT_STARTED:
cmd.vmState = Vm.VM_STATE_RUNNING
elif evstr == LibvirtEventManager.EVENT_STOPPED:
cmd.vmState = Vm.VM_STATE_SHUTDOWN
logger.debug(
'detected an abnormal vm operation[uuid:%s, op:%s], report it to %s' % (vm_uuid, evstr, url))
http.json_dump_post(url, cmd, {'commandpath': '/kvm/reportvmstate'})
report_to_management_node()
except:
content = traceback.format_exc()
logger.warn(content)
# WARNING: it contains quite a few hacks to avoid xmlobject#loads()
def _vm_reboot_event(self, conn, dom, opaque):
try:
domain_xml = dom.XMLDesc(0)
vm_uuid = dom.name()
@thread.AsyncThread
def report_to_management_node():
cmd = ReportVmRebootEventCmd()
cmd.vmUuid = vm_uuid
syslog.syslog('report reboot event for vm ' + vm_uuid)
http.json_dump_post(url, cmd, {'commandpath': '/kvm/reportvmreboot'})
# make sure reboot event only report once
op = self._get_operation(vm_uuid)
if op is None or op.op != VmPlugin.VM_OP_REBOOT:
url = self.config.get(kvmagent.SEND_COMMAND_URL)
if not url:
logger.warn(
'cannot find SEND_COMMAND_URL, unable to report shutdown event of vm[uuid:%s]' % vm_uuid)
return
report_to_management_node()
self._record_operation(vm_uuid, VmPlugin.VM_OP_REBOOT)
is_cdrom = self._check_boot_from_cdrom(domain_xml)
if not is_cdrom:
logger.debug(
"the vm[uuid:%s]'s boot device is not cdrom, nothing to do, skip this reboot event" % (vm_uuid))
return
logger.debug(
'the vm[uuid:%s] is set to boot from the cdrom, for the policy[bootFromHardDisk], the reboot will boot from hdd' % vm_uuid)
try:
dom.destroy()
except:
pass
xml = self.update_root_volume_boot_order(domain_xml)
xml = re.sub(r"""\stray\s*=\s*'open'""", """ tray='closed'""", xml)
domain = conn.defineXML(xml)
domain.createWithFlags(0)
except:
content = traceback.format_exc()
logger.warn(content)
# update the boot order of the root volume to 1, rely on the make_volumes() function
def update_root_volume_boot_order(self, domain_xml):
xml = minidom.parseString(domain_xml)
disks = xml.getElementsByTagName('disk')
boots = xml.getElementsByTagName("boot")
for boot in boots:
boot.parentNode.removeChild(boot);
order = xml.createElement("boot")
order.setAttribute("order", "1")
disks[0].appendChild(order)
xml = xml.toxml()
return xml
def _check_boot_from_cdrom(self, domain_xml):
is_cdrom = False
xml = minidom.parseString(domain_xml)
disks = xml.getElementsByTagName('disk')
for disk in disks:
if disk.getAttribute("device") == "cdrom" and disk.getElementsByTagName("boot").length > 0 and \
disk.getElementsByTagName("boot")[0].getAttribute("order") == "1":
is_cdrom = True
break
if not is_cdrom:
os = xml.getElementsByTagName("os")[0]
if os.getElementsByTagName("boot").length > 0 and os.getElementsByTagName("boot")[0].getAttribute(
"device") == "cdrom":
is_cdrom = True
return is_cdrom
@bash.in_bash
@misc.ignoreerror
def _extend_sharedblock(self, conn, dom, event, detail, opaque):
from shared_block_plugin import MAX_ACTUAL_SIZE_FACTOR
logger.debug("got event from libvirt, %s %s %s %s" %
(dom.name(), LibvirtEventManager.event_to_string(event), detail, opaque))
if not self.enable_auto_extend:
return
def check_lv(file, vm, device):
logger.debug("sblk max actual size factor %s" % MAX_ACTUAL_SIZE_FACTOR)
virtual_size, image_offest, _ = vm.domain.blockInfo(device)
lv_size = int(lvm.get_lv_size(file))
# image_offest = int(bash.bash_o("qemu-img check %s | grep 'Image end offset' | awk -F ': ' '{print $2}'" % file).strip())
# virtual_size = int(linux.qcow2_virtualsize(file))
return int(lv_size) < int(virtual_size) * MAX_ACTUAL_SIZE_FACTOR, image_offest, lv_size, virtual_size
@bash.in_bash
def extend_lv(event_str, path, vm, device):
# type: (str, str, Vm, object) -> object
r, image_offest, lv_size, virtual_size = check_lv(path, vm, device)
logger.debug("lv %s image offest: %s, lv size: %s, virtual size: %s" %
(path, image_offest, lv_size, virtual_size))
if not r:
logger.debug("lv %s is larager than virtual size * %s, skip extend for event %s" % (path, MAX_ACTUAL_SIZE_FACTOR, event_str))
return
extend_size = lv_size + self.auto_extend_size
try:
lvm.resize_lv(path, extend_size)
except Exception as e:
logger.warn("extend lv[%s] to size[%s] failed" % (path, extend_size))
if "incompatible mode" not in e.message.lower():
return
try:
with lvm.OperateLv(path, shared=False, delete_when_exception=False):
lvm.resize_lv(path, extend_size)
except Exception as e:
logger.warn("extend lv[%s] to size[%s] with operate failed" % (path, extend_size))
else:
logger.debug("lv %s extend to %s sucess" % (path, extend_size))
def get_path_by_device(device_name, vm):
for dev in vm.domain_xmlobject.devices.disk:
if dev.get_child_node("target").dev_ == device_name:
return dev.get_child_node("source").file_
@thread.AsyncThread
@lock.lock("sharedblock-extend-vm-%s" % dom.name())
def handle_event(dom, event_str):
# type: (libvirt.virDomain, str) -> object
vm_uuid = dom.name()
syslog.syslog("got suspend event from libvirt, %s %s %s" %
(vm_uuid, event_str, LibvirtEventManager.suspend_event_to_string(detail)))
disk_errors = dom.diskErrors() # type: dict
vm = get_vm_by_uuid_no_retry(vm_uuid, False)
if len(disk_errors) == 0:
syslog.syslog("no error in vm %s. skip to check and extend volume" % vm_uuid)
return
fixed = False
try:
for device, error in disk_errors.viewitems():
if error == libvirt.VIR_DOMAIN_DISK_ERROR_NO_SPACE:
path = get_path_by_device(device, vm)
syslog.syslog("disk %s:%s of vm %s got ENOSPC" % (device, path, vm_uuid))
if not lvm.lv_exists(path):
continue
extend_lv(event_str, path, vm, device)
fixed = True
except Exception as e:
syslog.syslog(str(e))
if fixed:
syslog.syslog("resume vm %s" % vm_uuid)
vm.resume()
touchQmpSocketWhenExists(vm_uuid)
event_str = LibvirtEventManager.event_to_string(event)
if event_str not in (LibvirtEventManager.EVENT_SUSPENDED,):
return
handle_event(dom, event_str)
def _clean_colo_heartbeat(self, conn, dom, event, detail, opaque):
event_str = LibvirtEventManager.event_to_string(event)
if event_str not in (LibvirtEventManager.EVENT_SHUTDOWN, LibvirtEventManager.EVENT_STOPPED):
return
vm_uuid = dom.name()
heartbeat_thread = self.vm_heartbeat.pop(vm_uuid, None)
if heartbeat_thread and heartbeat_thread.is_alive():
logger.debug("clean vm[uuid:%s] heartbeat, due to evnet %s" % (dom.name(), LibvirtEventManager.event_to_string(event)))
heartbeat_thread.do_heart_beat = False
heartbeat_thread.join()
@bash.in_bash
def _release_sharedblocks(self, conn, dom, event, detail, opaque):
logger.debug("got event from libvirt, %s %s" % (dom.name(), LibvirtEventManager.event_to_string(event)))
@linux.retry(times=5, sleep_time=1)
def wait_volume_unused(volume):
used_process = linux.linux_lsof(volume)
if len(used_process) != 0:
raise RetryException("volume %s still used: %s" % (volume, used_process))
@thread.AsyncThread
@bash.in_bash
def deactivate_colo_cache_volume(event_str, path, vm_uuid):
try:
wait_volume_unused(path)
finally:
used_process = linux.linux_lsof(path)
if len(used_process) == 0:
mount_path = path.rsplit('/',1)[0].replace("'", '')
sblk_volume_path = linux.get_mount_url(mount_path)
linux.umount(mount_path)
linux.rm_dir_force(mount_path)
if not sblk_volume_path:
syslog.syslog("vm: %s: no mount url found for %s" % (vm_uuid, mount_path))
try:
lvm.deactive_lv(sblk_volume_path, False)
syslog.syslog(
"deactivated volume %s for event %s happend on vm %s" % (
sblk_volume_path, event_str, vm_uuid))
except Exception as e:
logger.debug("deactivate volume %s for event %s happend on vm %s failed, %s" % (
sblk_volume_path, event_str, vm_uuid, str(e)))
else:
syslog.syslog("vm: %s, volume %s still used: %s, skip to deactivate" % (vm_uuid, path, used_process))
@thread.AsyncThread
@bash.in_bash
def deactivate_volume(event_str, file, vm_uuid):
# type: (str, str, str) -> object
volume = file.strip().split("'")[1]
syslog.syslog("deactivating volume %s for vm %s" % (file, vm_uuid))
lock_type = bash.bash_o("lvs --noheading --nolocking %s -ovg_lock_type" % volume).strip()
if "sanlock" not in lock_type:
syslog.syslog("%s has no sanlock, skip to deactive" % file)
return
try:
wait_volume_unused(volume)
finally:
used_process = linux.linux_lsof(volume)
if len(used_process) == 0:
try:
lvm.deactive_lv(volume, False)
syslog.syslog(
"deactivated volume %s for event %s happend on vm %s success" % (volume, event_str, vm_uuid))
except Exception as e:
syslog.syslog("deactivate volume %s for event %s happend on vm %s failed, %s" % (
volume, event_str, vm_uuid, str(e)))
else:
syslog.syslog("vm: %s, volume %s still used: %s, skip to deactivate" % (vm_uuid, volume, used_process))
try:
event_str = LibvirtEventManager.event_to_string(event)
if event_str not in (LibvirtEventManager.EVENT_SHUTDOWN, LibvirtEventManager.EVENT_STOPPED):
return
vm_uuid = dom.name()
vm_op_judger = self._get_operation(vm_uuid)
if vm_op_judger and event_str in vm_op_judger.ignore_libvirt_events():
logger.info("expected event for zstack op %s, ignore event %s on vm %s" % (vm_op_judger.op, event_str, vm_uuid))
return
out = bash.bash_o("virsh dumpxml %s | grep \"source file='/dev/\"" % vm_uuid).strip().splitlines()
if len(out) != 0:
for file in out:
deactivate_volume(event_str, file, vm_uuid)
out = bash.bash_o('virsh dumpxml %s | grep -E "(active|hidden) file="' % vm_uuid).strip().splitlines()
if len(out) != 0:
for cache_config in out:
path = cache_config.split('=')[1].rsplit('/', 1)[0]
deactivate_colo_cache_volume(event_str, path, vm_uuid)
else:
logger.debug("can not find sharedblock related volume for vm %s, skip to release" % vm_uuid)
except:
content = traceback.format_exc()
logger.warn("traceback: %s" % content)
def _vm_shutdown_event(self, conn, dom, event, detail, opaque):
try:
event = LibvirtEventManager.event_to_string(event)
if event not in (LibvirtEventManager.EVENT_SHUTDOWN,):
return
vm_uuid = dom.name()
# this is an operation outside zstack, report it
url = self.config.get(kvmagent.SEND_COMMAND_URL)
if not url:
logger.warn('cannot find SEND_COMMAND_URL, unable to report shutdown event of vm[uuid:%s]' % vm_uuid)
return
@thread.AsyncThread
def report_to_management_node():
cmd = ReportVmShutdownEventCmd()
cmd.vmUuid = vm_uuid
syslog.syslog('report shutdown event for vm ' + vm_uuid)
http.json_dump_post(url, cmd, {'commandpath': '/kvm/reportvmshutdown'})
report_to_management_node()
except:
content = traceback.format_exc()
logger.warn("traceback: %s" % content)
def _set_vnc_port_iptable_rule(self, conn, dom, event, detail, opaque):
try:
event = LibvirtEventManager.event_to_string(event)
if event not in (LibvirtEventManager.EVENT_STARTED, LibvirtEventManager.EVENT_STOPPED):
return
vm_uuid = dom.name()
if vm_uuid.startswith("guestfs-"):
logger.debug("[set_vnc_port_iptable]ignore the temp vm[%s] while using guestfish" % vm_uuid)
return
domain_xml = dom.XMLDesc(0)
domain_xmlobject = xmlobject.loads(domain_xml)
if is_namespace_used():
internal_id_node = find_zstack_metadata_node(etree.fromstring(domain_xml), 'internalId')
vm_id = internal_id_node.text if internal_id_node is not None else None
else:
vm_id = domain_xmlobject.metadata.internalId.text_ if xmlobject.has_element(domain_xmlobject, 'metadata.internalId') else None
if not vm_id:
logger.debug('vm[uuid:%s] is not managed by zstack, do not configure the vnc iptables rules' % vm_uuid)
return
vir = VncPortIptableRule()
if LibvirtEventManager.EVENT_STARTED == event:
if is_namespace_used():
host_ip_node = find_zstack_metadata_node(etree.fromstring(domain_xml), 'hostManagementIp')
vir.host_ip = host_ip_node.text
else:
vir.host_ip = domain_xmlobject.metadata.hostManagementIp.text_
if shell.run('ip addr | grep -w %s > /dev/null' % vir.host_ip) != 0:
logger.debug('the vm is migrated from another host, we do not need to set the console firewall, as '
'the management node will take care')
return
for g in domain_xmlobject.devices.get_child_node_as_list('graphics'):
if g.type_ == 'vnc' or g.type_ == 'spice':
vir.port = g.port_
break
vir.vm_internal_id = vm_id
vir.apply()
logger.debug('Enable [port:%s] in firewall rule for vm[uuid:%s] console' % (vir.port, vm_id))
elif LibvirtEventManager.EVENT_STOPPED == event:
vir.vm_internal_id = vm_id
vir.delete()
logger.debug('Delete firewall rule for vm[uuid:%s] console' % vm_id)
except:
# if vm do live migrate the dom may not be found or the vm has been undefined
vm = get_vm_by_uuid(dom.name(), False)
if not vm:
logger.debug("can not get domain xml of vm[uuid:%s], "
"the vm may be just migrated here or it has already been undefined" % dom.name())
return
content = traceback.format_exc()
logger.warn(content)
def _delete_pushgateway_metric(self, conn, dom, event, detail, opaque):
try:
event = LibvirtEventManager.event_to_string(event)
if event != LibvirtEventManager.EVENT_STOPPED:
return
output = shell.call('ps aux | grep [p]ushgateway')
if '/var/lib/zstack/kvm/pushgateway' not in output:
return
port = None
lines = output.splitlines()
for line in lines:
if '/var/lib/zstack/kvm/pushgateway' in line:
port = line[line.rindex('web.listen-address :') + 20:]
port = port.split()[0]
break
vm_uuid = dom.name()
url = "http://localhost:%s/metrics/job/zwatch_vm_agent/vmUuid/%s" % (port, vm_uuid)
shell.run('curl -X DELETE ' + url)
except Exception as e:
logger.warn("delete pushgateway metric when vm stoped failed: %s" % e.message)
def register_libvirt_event(self):
#LibvirtAutoReconnect.add_libvirt_callback(libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self._vm_lifecycle_event)
LibvirtAutoReconnect.add_libvirt_callback(libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._set_vnc_port_iptable_rule)
LibvirtAutoReconnect.add_libvirt_callback(libvirt.VIR_DOMAIN_EVENT_ID_REBOOT, self._vm_reboot_event)
LibvirtAutoReconnect.add_libvirt_callback(libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self._vm_shutdown_event)
LibvirtAutoReconnect.add_libvirt_callback(libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self._release_sharedblocks)
LibvirtAutoReconnect.add_libvirt_callback(libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self._clean_colo_heartbeat)
LibvirtAutoReconnect.add_libvirt_callback(libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self._extend_sharedblock)
LibvirtAutoReconnect.add_libvirt_callback(libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self._delete_pushgateway_metric)
LibvirtAutoReconnect.register_libvirt_callbacks()
def register_qemu_log_cleaner(self):
def pick_uuid_from_filename(filename):
pattern = r'^([0-9a-f]{32})\.log'
matcher = re.match(pattern, filename)
if matcher:
return matcher.group(1) # return uuid
else:
return None
def qemu_log_cleaner():
logger.debug('Clean libvirt log task start')
try:
log_paths = linux.listPath('/var/log/libvirt/qemu/')
all_active_vm_uuids = set(get_all_vm_states())
# log life : 180 days
clean_time = datetime.datetime.now() - datetime.timedelta(days=180)
for p in log_paths:
filename = os.path.basename(p)
uuid = pick_uuid_from_filename(filename)
if uuid and uuid in all_active_vm_uuids:
# vm exists
continue
try:
modify_time = datetime.datetime.fromtimestamp(os.stat(p).st_mtime)
if modify_time < clean_time:
linux.rm_file_force(p)
except Exception as ex_inner:
logger.warn('Failed to clean libvirt log files `%s` because : %s' % (p, str(ex_inner)))
except Exception as ex_outer:
logger.warn('Failed to clean libvirt log files because : %s' % str(ex_outer))
# run cleaner : once a day
thread.timer(24 * 3600, qemu_log_cleaner).start()
# first time
thread.timer(60, qemu_log_cleaner).start()
def clean_old_sshfs_mount_points(self):
mpts = shell.call("mount -t fuse.sshfs | awk '{print $3}'").splitlines()
for mpt in mpts:
if mpt.startswith(tempfile.gettempdir()):
pids = linux.get_pids_by_process_fullname(mpt)
for pid in pids:
linux.kill_process(pid, is_exception=False)
linux.fumount(mpt, 2)
def stop(self):
self.clean_old_sshfs_mount_points()
pass
def configure(self, config):
self.config = config
class EmptyCdromConfig():
def __init__(self, targetDev, bus, unit):
self.targetDev = targetDev
self.bus = bus
self.unit = unit
class VolumeIDEConfig():
def __init__(self, bus, unit):
self.bus = bus
self.unit = unit
class ColoReplicationConfig():
def __init__(self, alias_name, replication_id):
self.alias_name = alias_name
self.replication_id = replication_id
class VolumeSnapshotJobStruct(object):
def __init__(self, volumeUuid, volume, installPath, vmInstanceUuid, previousInstallPath,
newVolumeInstallPath, live=True, full=False, memory=False):
self.volumeUuid = volumeUuid
self.volume = volume
self.installPath = installPath
self.vmInstanceUuid = vmInstanceUuid
self.previousInstallPath = previousInstallPath
self.newVolumeInstallPath = newVolumeInstallPath
self.memory = memory
self.live = live
self.full = full
class VolumeSnapshotResultStruct(object):
def __init__(self, volumeUuid, previousInstallPath, installPath, size=None):
"""
:type volumeUuid: str
:type size: long
:type installPath: str
:type previousInstallPath: str
"""
self.volumeUuid = volumeUuid
self.previousInstallPath = previousInstallPath
self.installPath = installPath
self.size = size
@bash.in_bash
@misc.ignoreerror
def touchQmpSocketWhenExists(vmUuid):
if vmUuid is None:
return
path = "%s/%s.sock" % (QMP_SOCKET_PATH, vmUuid)
if os.path.exists(path):
bash.bash_roe("touch %s" % path)
| 43.205351 | 382 | 0.575304 | 318,309 | 0.956923 | 0 | 0 | 181,884 | 0.546793 | 0 | 0 | 65,348 | 0.196454 |
ba0f9f2c8594abc6e459db5569577285c57e27dd | 4,484 | py | Python | dics_megset.py | larsoner/beamformer_simulation | ebc9cfc8bc73434ecd995c3b85560db962642307 | [
"BSD-3-Clause"
] | null | null | null | dics_megset.py | larsoner/beamformer_simulation | ebc9cfc8bc73434ecd995c3b85560db962642307 | [
"BSD-3-Clause"
] | null | null | null | dics_megset.py | larsoner/beamformer_simulation | ebc9cfc8bc73434ecd995c3b85560db962642307 | [
"BSD-3-Clause"
] | null | null | null | import mne
import numpy as np
import pandas as pd
from mne.beamformer import make_dics, apply_dics_csd
from config import dics_settings, fname, args
from megset.config import fname as megset_fname
from megset.config import freq_range
subject = args.subject
print(f'Running analsis for subject {subject}')
mne.set_log_level(False) # Shhh
###############################################################################
# Load the data
###############################################################################
epochs = mne.read_epochs(megset_fname.epochs_long(subject=subject))
fwd = mne.read_forward_solution(megset_fname.fwd(subject=subject))
dip = mne.read_dipole(megset_fname.ecd(subject=subject))
###############################################################################
# Sensor-level analysis for beamformer
###############################################################################
epochs_grad = epochs.copy().pick_types(meg='grad')
epochs_mag = epochs.copy().pick_types(meg='mag')
epochs_joint = epochs.copy().pick_types(meg=True)
# Make csd matrices
freqs = np.arange(*freq_range[subject])
csd = mne.time_frequency.csd_morlet(epochs, freqs, tmin=-0.8, tmax=1.0, decim=5)
csd_baseline = mne.time_frequency.csd_morlet(epochs, freqs, tmin=-0.8, tmax=0, decim=5)
# ERS activity starts at 0.5 seconds after stimulus onset
csd_ers = mne.time_frequency.csd_morlet(epochs, freqs, tmin=0.2, tmax=1.0, decim=5)
csd = csd.mean()
csd_baseline = csd_baseline.mean()
csd_ers = csd_ers.mean()
###############################################################################
# Compute dics solution and plot stc at dipole location
###############################################################################
dists = []
focs = []
ori_errors = []
for setting in dics_settings:
reg, sensor_type, pick_ori, inversion, weight_norm, normalize_fwd, real_filter, use_noise_cov, reduce_rank = setting
try:
if sensor_type == 'grad':
info = epochs_grad.info
elif sensor_type == 'mag':
info = epochs_mag.info
elif sensor_type == 'joint':
info = epochs_joint.info
else:
raise ValueError('Invalid sensor type: %s', sensor_type)
info_eq, fwd_eq, csd_eq = mne.channels.equalize_channels([info, fwd, csd])
filters = make_dics(info_eq, fwd_eq, csd_eq, reg=reg, pick_ori=pick_ori,
inversion=inversion, weight_norm=weight_norm,
noise_csd=csd_baseline if use_noise_cov else None,
normalize_fwd=normalize_fwd,
real_filter=real_filter, reduce_rank=reduce_rank)
# Compute source power
stc_baseline, _ = apply_dics_csd(csd_baseline, filters)
stc_power, _ = apply_dics_csd(csd_ers, filters)
# Normalize with baseline power.
stc_power /= stc_baseline
stc_power.data = np.log(stc_power.data)
peak_vertex, _ = stc_power.get_peak(vert_as_index=True)
# Compute distance between true and estimated source locations
pos = fwd['source_rr'][peak_vertex]
dist = np.linalg.norm(dip.pos - pos)
# Ratio between estimated peak activity and all estimated activity.
focality_score = stc_power.data[peak_vertex, 0] / stc_power.data.sum()
if pick_ori == 'max-power':
estimated_ori = filters['max_power_oris'][0][peak_vertex]
ori_error = np.rad2deg(np.arccos(estimated_ori @ dip.ori[0]))
if ori_error > 90:
ori_error = 180 - ori_error
else:
ori_error = np.nan
except Exception as e:
print(e)
dist = np.nan
focality_score = np.nan
ori_error = np.nan
print(setting, dist, focality_score, ori_error)
dists.append(dist)
focs.append(focality_score)
ori_errors.append(ori_error)
###############################################################################
# Save everything to a pandas dataframe
###############################################################################
df = pd.DataFrame(dics_settings,
columns=['reg', 'sensor_type', 'pick_ori', 'inversion',
'weight_norm', 'normalize_fwd', 'real_filter',
'use_noise_cov', 'reduce_rank'])
df['dist'] = dists
df['focality'] = focs
df['ori_error'] = ori_errors
df.to_csv(fname.dics_megset_results(subject=subject))
print('OK!')
| 38 | 120 | 0.573372 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,316 | 0.293488 |
ba0ff3598421ac2513f7f174a00699fe46db7b6a | 23,444 | py | Python | analyze_clusters.py | sopeeweje/NLP-AI-Diagnosis | a5d7a590c98aad87f87cf75a7d25f3ca4a51d4be | [
"MIT"
] | null | null | null | analyze_clusters.py | sopeeweje/NLP-AI-Diagnosis | a5d7a590c98aad87f87cf75a7d25f3ca4a51d4be | [
"MIT"
] | 12 | 2021-09-17T23:09:02.000Z | 2021-12-29T22:37:03.000Z | analyze_clusters.py | sopeeweje/NLP-AI-Diagnosis | a5d7a590c98aad87f87cf75a7d25f3ca4a51d4be | [
"MIT"
] | 1 | 2021-09-18T19:38:58.000Z | 2021-09-18T19:38:58.000Z | import csv
from sklearn.cluster import MiniBatchKMeans
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pickle
import numpy as np
import sklearn.metrics as metrics
from yellowbrick.cluster import InterclusterDistance
from scipy.optimize import curve_fit
import umap.umap_ as umap
from colorsys import hls_to_rgb
from pylab import *
from datetime import datetime
import os
import argparse
import scipy.stats as scist
from docx import Document
from feature_extraction import LemmaStemmerTokenizer
# Allow for larger CSV files
maxInt = sys.maxsize
while True:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
try:
csv.field_size_limit(maxInt)
break
except OverflowError:
maxInt = int(maxInt/10)
def get_clusters(selected_k, data_file, processed_file, centers, years, save_folder="", save=True):
"""
Parameters
----------
selected_k : selected number of clusters
data_file : pickle with raw data as list of dictionaries
processed_file : pickle with transformed data as array
centers : array. initial centroids from LDA. Can be initialized as 'k-means++'
years : list of strings. years for intracluster analysis
save_folder : string. directory to save result, the default is "".
save : boolean
Returns
-------
output : dictionary. Keys:
"yr_avg_cost": List of lists. Average funding by year for each cluster.
"yr_total_cost": List of lists. Total funding by year for each cluster.
"size": List. Size of each cluster.
"data_by_cluster": List of lists of dictionaries. Points in each cluster: [ [{Cluster1pt1}, {Cluster1pt2},...], [{Cluster2pt1}, {Cluster2pt2},...], ...]
"centroids": 10 x K array of cluster centroids,
"score": List. Silhouette score by cluster
"model": MiniBatchKMeans model
"labels": Cluster labels of data points (ordered)
"""
# Load data as list of dictionaries
data = pickle.load(open(data_file,"rb"))
# Transformed data
X_transformed = pickle.load(open(processed_file,"rb"))
# Perform mini batch k means
km = MiniBatchKMeans(n_clusters=selected_k, init=centers, verbose=0, max_no_improvement=None)
clusters = km.fit_predict(X_transformed)
scores = metrics.silhouette_samples(X_transformed, clusters)
# Output data
cluster_all = []
costs = []
yoy = []
size = []
mechanisms = []
for i in range(6): # initialization
mechanisms.append([])
MECH_NAMES = "R01", "U01", "R44", "U24", "R21", "U54"
for i in range(0,selected_k):
# indices of cluster k
cluster = [idx for idx, element in enumerate(clusters) if element == i]
# get points
cluster_data = [data[ind] for ind in cluster]
cluster_scores = [scores[ind] for ind in cluster]
for i in range(len(cluster_data)):
cluster_data[i]["score"] = cluster_scores[i]
cluster_all.append(cluster_data)
# calculate average cost and std
try:
average_cost = sum([item["award_amount"] for item in cluster_data])/len(cluster_data)
except:
average_cost = 0
costs.append(average_cost)
cost_trend = []
for year in years:
year_data = [data[ind]["award_amount"] for ind in cluster if data[ind]["year"] == year]
if len(year_data) == 0:
cost_trend.append(0)
else:
year_cost = sum(year_data) # /len(year_data)
cost_trend.append(year_cost)
yoy.append(cost_trend)
size.append(len(cluster))
# get number of awards per mechanism
if len(cluster_data) != 0:
for j in range(len(mechanisms)):
mech = len([ind for ind in cluster if data[ind]["mechanism"] == MECH_NAMES[j]])/len(cluster_data)
mechanisms[j].append(mech)
else:
for j in range(len(mechanisms)):
mechanisms[j].append(0)
# Get centroids
# Identify the top terms for each cluster, using the TF-IDF terms with the highest values in the centroid
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
vectorizer = pickle.load(open("data/vectorizer.pkl","rb"))
terms = vectorizer.get_feature_names_out()
centroids = []
for i in range(selected_k):
centroid_list = []
for ind in order_centroids[i, :15]:
centroid_list.append(terms[ind])
centroids.append(centroid_list)
# Save centroids
if save:
centroid_file = open("{}/centroids".format(save_folder), "w", encoding='utf8')
for i in range(selected_k):
centroid_file.write("Cluster %d:" % i)
for ind in order_centroids[i, :15]:
centroid_file.write(" %s" % terms[ind])
centroid_file.write("\n")
centroid_file.close()
# get scores
score = metrics.silhouette_score(X_transformed, km.labels_)
output = {
"yr_avg_cost": costs, # Average award size by year by cluster
"yr_total_cost": yoy, # Total award size by year by cluster
"size": size, # Number of awards in each cluster
"data_by_cluster": cluster_all,
"centroids": centroids,
"score": score, # Silhouette score for
"model": km, # K-means model
"labels": clusters, # Ordered list of cluster number labels for each award
"mechanisms": mechanisms # List of lists: [r01, u01, r44, u24, r21, u54]. Each internal list has number of awards per mechanism by cluster
}
return output
def umap_visualization(X_transformed, cluster_labels, silhouette_scores, sizes, save_folder=""):
#outlier_scores = sklearn.neighbors.LocalOutlierFactor(contamination=0.1).fit_predict(X_transformed)
#X_transformed = X_transformed[outlier_scores != -1]
#cluster_labels = cluster_labels[outlier_scores != -1]
# product = [silhouette_scores[i]*sizes[i] for i in range(len(sizes))]
top_clusters = sorted(range(len(silhouette_scores)), key=lambda i: silhouette_scores[i], reverse=True)[:9]
n_subset = len(cluster_labels)
selected_cells = np.random.choice(np.arange(X_transformed.shape[0]), size = n_subset, replace = False)
mapper = umap.UMAP(metric='hellinger', random_state=42).fit(X_transformed[selected_cells,:])
embedding = mapper.transform(X_transformed[selected_cells,:])
# Colors
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:olive', 'tab:cyan']
selected_colors = []
for point in selected_cells:
if cluster_labels[point] in top_clusters:
selected_colors.append(colors[top_clusters.index(cluster_labels[point])])
else:
selected_colors.append('tab:gray')
# Plot Clusters on UMAP
plt.figure()
plt.grid(visible=None)
plt.scatter(embedding[:, 0], embedding[:, 1], cmap='Spectral', s=5, c=selected_colors)
plt.gca().set_aspect('equal', 'datalim')
num_clust = len(np.unique(cluster_labels[selected_cells]))
#plt.colorbar(boundaries=np.arange(num_clust+1)-0.5).set_ticks(np.arange(num_clust))
plt.title('UMAP Projection of Awards, TF-IDF', fontsize=14)
plt.xlabel("UMAP 1")
plt.ylabel("UMAP 2")
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
plt.savefig('{}/umap.png'.format(save_folder))
def rainbow_color_stops(n=10, end=1, shade=0.9):
return [ hls_to_rgb(end * i/(n-1)*shade, 0.5*shade, 1*shade) for i in range(n) ]
def get_funding_projections(data):
# 1. Determine dimensions for plot
k = len(data["size"])
factors = []
for i in range(1, k+1):
if k / i == i:
factors.extend([i,i])
elif k % i == 0:
factors.append(i)
dim1, dim2 = factors[int(len(factors)/2)], factors[int(len(factors)/2-1)]
# 2. Create plot
fig, axs = plt.subplots(dim1, dim2, sharex='all', sharey='all')
# 3. Create hidden frame for shared labels
fig.add_subplot(111, frameon=False)
plt.grid(visible=None)
plt.tick_params(labelcolor='none', which='both', top=False, bottom=False, left=False, right=False)
plt.xlabel("Years from 1985")
plt.ylabel("Funding ($100 millions)")
# 4. Get projections
years_int = list(range(0,36))
projection = []
growth = []
bounds = []
for i in range(len(data["yr_total_cost"])):
popt, pcov = curve_fit(lambda t,a,b: a*np.exp(b*t), years_int, data["yr_total_cost"][i], p0=(4000, 0.1))
std = np.sqrt(np.diagonal(pcov))
x = np.linspace(0,21,400)
# upper0 = popt[0]+1.96*std[0]
# lower0 = popt[0]-1.96*std[0]
upper1 = popt[1]+1.96*std[1]
lower1 = popt[1]-1.96*std[1]
ypred = [popt[0]*np.exp(popt[1]*point) for point in x] #-popt[0]
projection.append(ypred[-1])
growth.append(popt[1])
bounds.append([lower1, upper1])
# projection.append(0)
# growth.append(0)
# bounds.append([0,0])
# 5. Return 2021 projections and growth rate
return projection, growth, bounds
def viz_centroids(data):
model = data["model"]
X_transformed = pickle.load(open("data/processed-data.pkl","rb"))
plt.figure()
visualizer = InterclusterDistance(model, random_state=0)
visualizer.fit(X_transformed) # Fit the data to the visualizer
visualizer.show() # Finalize and render the figure
def predict_clusters(test_data, selected_k, model):
test_data = pickle.load(open(test_data,"rb"))
vectorizer = pickle.load(open("data/vectorizer.pkl","rb"))
input_text = [item["text"] for item in test_data]
if len(input_text) == 0:
return [0 for i in range(0,selected_k)], 0
test_transformed = vectorizer.transform(input_text)
years = [str(i) for i in range(1985,2021)]
labels = model.predict(test_transformed)
# Output data
cluster_all = []
costs = []
yoy = []
size = []
for i in range(0,selected_k):
# indices of cluster k
cluster = [idx for idx, element in enumerate(labels) if element == i]
# get points
cluster_data = [test_data[ind] for ind in cluster]
cluster_all.append(cluster_data)
# calculate average cost and std
try:
average_cost = sum([item["award_amount"] for item in cluster_data])/len(cluster_data)
except:
average_cost = 0
costs.append(average_cost)
cost_trend = []
for year in years:
year_data = [test_data[ind]["award_amount"] for ind in cluster if test_data[ind]["year"] == year]
if len(year_data) == 0:
cost_trend.append(0)
else:
year_cost = sum(year_data)
cost_trend.append(year_cost)
yoy.append(cost_trend)
size.append(len(cluster))
return cluster_all, size
def get_best_cluster(selected_k, num_trials, centers, years, save_folder="", save=True):
scores = []
results = {}
print("Optimizing model...")
for i in range(num_trials):
# Generate clusters for a selected k
data = get_clusters(selected_k, "data/data.pkl", "data/processed-data.pkl", 'k-means++', years, save_folder, save=save)
j = 0
for thing in data["data_by_cluster"]:
for item in thing:
try:
results[item["id"]].append(centroids[j])
except:
results[item["id"]] = [item["id"],item["title"],item["award_amount"],data["centroids"][j]]
j+=1
print("Trial {}: Score = {:.3f}".format(str(i+1), data["score"]))
scores.append(data["score"])
if data["score"] >= max(scores):
chosen = data
return chosen, scores
def get_citations(clusters):
"""
Parameters
----------
clusters : nested lists of dictionaries representing each award in a cluster.
Returns
-------
total_citations : list of total citations by cluster
total_papers : list of total papers by cluster
apts: average APT [0.9, ...]
lower: lower bound of 95% CI of average APT: "APT (lower - upper)" [0.85,...]
upper: upper bound of 95% CI of average APT [0.95,...] - "0.9 (0.85-0.95)"
"""
# Get clusters by project number
clusters_by_project = []
for cluster in clusters:
cluster = [item["project_number"] for item in cluster]
cluster = list(set(cluster)) # Remove duplicates
clusters_by_project.append(cluster)
# Get number of citations, apt, and publication year by paper
output = {}
with open("data/citations.csv", newline='', encoding='utf8') as csvfile:
raw_data = list(csv.reader(csvfile))
for i in range(1,len(raw_data)): # "rcr": float(raw_data[i][6]),
output[raw_data[i][0]] = {
"citations": int(raw_data[i][13]),
"apt": float(raw_data[i][11]),
"year": int(raw_data[i][1])}
# Get project number and year by paper
with open("data/publications.csv", newline='', encoding='utf8') as csvfile:
raw_data = list(csv.reader(csvfile))
for i in range(1,len(raw_data)):
if raw_data[i][1] in output.keys():
output[raw_data[i][1]]["project"] = raw_data[i][0]
# Calculate total number of citations, total number of papers, average RCR, average APT for each cluster
total_citations = []
total_papers = []
apts = []
apts_95 = []
lower = []
upper = []
total_availability = []
# rcrs = []
for cluster in clusters_by_project:
cluster_citations = []
# cluster_rcr = []
cluster_apt = []
num_papers = 0
availability = []
for idd in cluster:
papers = [output[key]["citations"] for key in output if output[key]["project"]==idd] # list of all papers associated with cluster by citation count
# rcr = [output[key]["rcr"] for key in output if output[key]["project"]==idd]
apt = [output[key]["apt"] for key in output if output[key]["project"]==idd]
avail_years = [max(0, 2021-output[key]["year"]) for key in output if output[key]["project"]==idd]
# cluster_rcr.extend(rcr)
cluster_apt.extend(apt)
num_papers += len(papers)
cluster_citations.append(sum(papers))
availability.append(sum(avail_years))
total_citations.append(sum(cluster_citations))
total_papers.append(num_papers)
apts_95.append(sum([1 for i in cluster_apt if i==0.95])/len(cluster_apt))
apts.append(np.mean(cluster_apt))
#create 95% confidence interval for population mean weight
apts_interval = scist.norm.interval(alpha=0.95, loc=np.mean(cluster_apt), scale=scist.sem(cluster_apt))
lower.append(apts_interval[0])
upper.append(apts_interval[1])
# rcrs.append(sum(cluster_apt)/len(cluster_apt))
total_availability.append(int(sum(availability)))
return total_citations, total_papers, apts_95, apts, lower, upper, total_availability
def get_rep_clusters(result):
path, dirs, files = next(os.walk('{}/clusters'.format(result)))
file_count = len(files)
if file_count == 0:
return
document = Document()
for i in range(file_count):
unique_awards = {}
# open file
with open('{}/clusters/cluster-{}.csv'.format(result, str(i)), newline='', encoding='utf8') as csvfile:
raw_data = list(csv.reader(csvfile))
for j in range(1,len(raw_data)):
title = raw_data[j][1]
organization = raw_data[j][6]
mechanism = raw_data[j][7]
year = int(raw_data[j][8])
score = float(raw_data[j][11])
# If this is a new title
if title not in unique_awards:
unique_awards[title] = {
"organization": organization,
"activity": mechanism,
"year": year,
"score": score,
}
# If the title is already there
else:
current_year = unique_awards[title]["year"]
# Use the most recent one
if year > current_year:
unique_awards[title] = {
"organization": organization,
"activity": mechanism,
"year": year,
"score": score,
}
unique_awards_sorted = dict(sorted(unique_awards.items(), key = lambda item: -item[1]["score"]))
unique_awards_list = list(unique_awards_sorted.items())[0:5]
p = document.add_paragraph()
p.add_run('Cluster {}:'.format(str(i))).bold = True
table = document.add_table(rows=6, cols=5)
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Title'
hdr_cells[1].text = 'Awardee'
hdr_cells[2].text = 'Award Activity'
hdr_cells[3].text = 'Year'
hdr_cells[4].text = 'Sample Silhouette Score'
for i in range(len(unique_awards_list)):
table.cell(i+1,0).text = unique_awards_list[i][0] # Title
table.cell(i+1,1).text = unique_awards_list[i][1]['organization'] # Awardee
table.cell(i+1,2).text = unique_awards_list[i][1]['activity'] # Award Activity
table.cell(i+1,3).text = str(unique_awards_list[i][1]['year']) # Year
table.cell(i+1,4).text = "{:.2g}".format(unique_awards_list[i][1]['score']) # Sample Silhouette Score
document.add_page_break()
document.save('{}/supp_info.docx'.format(result))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--k',
type=int,
required=True,
help='number of clusters',
default=30,
)
parser.add_argument(
'--trials',
type=int,
required=True,
help='number of trials',
default=50,
)
FLAGS, unparsed = parser.parse_known_args()
years = [str(i) for i in range(1985,2021)]
selected_k = FLAGS.k
num_trials = FLAGS.trials
centers = 'k-means++'
# Create folder to save results
now = datetime.now()
save_folder = "results/"+now.strftime("%m-%d-%Y--%H%M%S")
os.mkdir(save_folder)
# Get best clustering
data, scores = get_best_cluster(selected_k, num_trials, centers, years, save_folder)
with open("{}/model_clustering.pkl".format(save_folder), 'wb') as handle:
pickle.dump(data, handle)
# Final cluster files
num = 0
os.mkdir(save_folder+"/clusters")
for cluster in data["data_by_cluster"]:
if cluster == []:
continue
keys = cluster[0].keys()
with open('{}/clusters/cluster-{}.csv'.format(save_folder,str(num)), 'w', newline='', encoding='utf8') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(cluster)
num+=1
# Silhouette score by cluster
print("")
print("------Silhouette scores------")
X_transformed = pickle.load(open("data/processed-data.pkl","rb"))
scores = metrics.silhouette_samples(X_transformed, data["labels"])
tabulated = []
pairs = [(scores[i],data["labels"][i]) for i in range(len(scores))]
for i in range(selected_k):
avg_score = np.mean([j[0] for j in pairs if j[1] == i])
print("Cluster {}: {}".format(str(i), str(avg_score)))
tabulated.append(avg_score)
print("----------------------------")
print("")
# Final centroids
order_centroids = data["model"].cluster_centers_.argsort()[:, ::-1]
vectorizer = pickle.load(open("data/vectorizer.pkl","rb"))
terms = vectorizer.get_feature_names_out()
centroids = []
centroid_file = open("{}/centroids".format(save_folder), "w", encoding='utf8')
for i in range(selected_k):
centroid_file.write("Cluster %d:" % i)
centroid_list = []
for ind in order_centroids[i, :15]:
centroid_file.write(" %s," % terms[ind])
centroid_list.append(terms[ind])
centroids.append(centroid_list)
centroid_file.write("\n")
centroid_file.close()
# UMAP Visualization
X_transformed = pickle.load(open("data/processed-data.pkl","rb"))
umap_visualization(X_transformed, data["labels"], tabulated, data["size"], save_folder)
# Get 2021 projections, projected growth rates, and confidence bounds on growth rates by cluster
projection, growth, bounds = get_funding_projections(data) # 2021 prediction
# Get 2021 clusters
model = data["model"]
clusters_test, size_test = predict_clusters("data/test-data.pkl", selected_k, model)
x = np.arange(selected_k)
if size_test == 0:
cluster_cost_2021 = [0 for i in range(0, selected_k)]
else:
cluster_cost_2021 = [(sum([item["award_amount"] for item in group]) if len(group) > 0 else 0) for group in clusters_test]
# Save 2021 clusters
num = 0
os.mkdir("{}/clusters_test".format(save_folder))
for cluster in clusters_test:
try:
keys = cluster[0].keys()
except:
num+=1
continue
with open('{}/clusters_test/cluster-{}.csv'.format(save_folder,str(num)), 'w', newline='', encoding='utf8') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(cluster)
num+=1
# Citations and papers
citations, papers, apt_pct, apt, lower, upper, availability = get_citations(data["data_by_cluster"])
# Total funding
total_cluster_funding = [sum([item["award_amount"] for item in group]) for group in data["data_by_cluster"]]
# Get representative clusters for supp info
get_rep_clusters(save_folder)
# All data - note blank columns for description, category
output = [["Cluster", "Size", "Total", "Citations", "APT % over 95%", "Avg. APT", "95%CI L", "95%CI U", "Papers", "Citations per $1mil funding", "Years of Availability", "Citations per thousand dollars of funding per year", "Projected 2021 Award", "Actual 2021 Award To Date", "Growth Rate", "95%CI L", "95%CI U", "Score", "Description", "Category", "Clinical/Technical", "Centroids", "%R01", "%U01", "%R44", "%U24", "%R21", "%U54"]]
for i in range(selected_k):
output.append([i, data["size"][i], total_cluster_funding[i], citations[i], apt_pct[i], apt[i], lower[i], upper[i], papers[i], citations[i]/total_cluster_funding[i]*1e6, availability[i], citations[i]/total_cluster_funding[i]*1e3/availability[i], projection[i], cluster_cost_2021[i], growth[i], bounds[i][0], bounds[i][1], tabulated[i], " ", " ", " ", centroids[i], data["mechanisms"][0][i], data["mechanisms"][1][i], data["mechanisms"][2][i], data["mechanisms"][3][i], data["mechanisms"][4][i], data["mechanisms"][5][i]])
with open('{}/final_data.csv'.format(save_folder), 'w', newline='', encoding='utf8') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(output)
print("Complete.")
| 39.138564 | 528 | 0.617088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,722 | 0.286726 |
ba10c2347d43dfcd19f5c723cff12b3417b9fd2d | 1,528 | py | Python | seeq/addons/correlation/_utils/_permissions.py | seeq12/seeq-correlation | ab2ed13871352dc3671f5d5df09ec3aebd5f24f5 | [
"Apache-2.0"
] | 2 | 2021-11-17T00:17:43.000Z | 2022-01-25T21:15:04.000Z | seeq/addons/correlation/_utils/_permissions.py | seeq12/seeq-correlation | ab2ed13871352dc3671f5d5df09ec3aebd5f24f5 | [
"Apache-2.0"
] | 5 | 2021-11-02T23:13:57.000Z | 2022-02-14T21:30:32.000Z | seeq/addons/correlation/_utils/_permissions.py | seeq12/seeq-correlation | ab2ed13871352dc3671f5d5df09ec3aebd5f24f5 | [
"Apache-2.0"
] | null | null | null | from seeq.sdk.rest import ApiException
from . import print_red
def permissions_defaults(permissions_group: list, permissions_users: list):
if permissions_group is None:
permissions_group = ['Everyone']
if permissions_users is None:
permissions_users = []
return permissions_group, permissions_users
def add_datalab_project_ace(data_lab_project_id, ace_input, items_api):
if data_lab_project_id:
try:
items_api.add_access_control_entry(id=data_lab_project_id, body=ace_input)
except Exception as error:
print_red(error.body)
def get_user_group(group_name, user_groups_api):
try:
group = user_groups_api.get_user_groups(name_search=group_name)
assert len(group.items) != 0, 'No group named "%s" was found' % group_name
assert len(group.items) == 1, 'More that one group named "%s" was found' % group_name
return group
except AssertionError as error:
print_red(error)
except ApiException as error:
print_red(error.body)
def get_user(user_name, users_api):
try:
user_ = users_api.get_users(username_search=user_name)
if len(user_.users) == 0:
raise ValueError(f'No user named {user_name} was found')
if len(user_.users) > 1:
raise ValueError(f'More than one user named {user_name} was found')
return user_
except AssertionError as error:
print_red(error)
except ApiException as error:
print_red(error.body)
| 33.217391 | 93 | 0.689136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.111257 |
ba13957d1dfe8a9dad60a0b98fdc76bc2c051de1 | 2,133 | py | Python | zignet/api.py | Rabbit-Development/Zignet | 341ac2a5e411c385898a060bb41a825e1142625e | [
"MIT"
] | null | null | null | zignet/api.py | Rabbit-Development/Zignet | 341ac2a5e411c385898a060bb41a825e1142625e | [
"MIT"
] | null | null | null | zignet/api.py | Rabbit-Development/Zignet | 341ac2a5e411c385898a060bb41a825e1142625e | [
"MIT"
] | null | null | null | from zignet import controller, db
from zignet.models import *
from flask import request, abort, make_response
@controller.route('/login', methods = ['POST'])
def login():
username = request.get('username')
password = request.get('password')
Rfid = request.get('Rfid')
pincode = request.get('pincode')
if any(username) and any(password) :
user = db.session.query(User).filter(User.username == username).first()
if user.verify_password(password) :
return make_response('true')
else :
abort(401)
elif any(Rfid) and any(pincode):
user = db.session.query(User).filter(User.Rfid == Rfid).first()
if user.verify_pincode(pincode) :
return make_response('true')
else :
abort(401)
else :
abort(400)
@controller.route('/create_user', methods = ['POST'])
def create_user():
username = request.get('username')
password = request.get('password')
email = request.get('email')
user = User(email = email, password = password, username = username)
db.session.add(user)
db.session.commit()
return make_response('user created')
@controller.route('/update_user', methods = ['POST'])
def update_user():
username = request.get('username')
password = request.get('password')
email = request.get('email')
pincode = request.get('pincode')
first_name = request.get('first_name')
last_name = request.get('last_name')
phone = request.get('phone')
address = request.get('address')
zip_code = request.get('zip_code')
country = request.get('country')
user = db.session.query(User).filter(User.username == username).first()
if user.verify_password(password) :
if any(email) :
user.email = email
if any(username) :
user.username = username
if any(password) :
user.password = password
if any(pincode) :
user.pincode = pincode
if any(first_name) :
user.first_name = first_name
if any(last_name) :
user.last_name = last_name
if any(phone) :
user.phone = phone
if any(address) :
user.address = address
if any(zip_code) :
user.zip_code = zip_code
if any(country) :
user.country = country
return make_response('user updated')
else:
abort(401)
| 22.935484 | 73 | 0.69339 | 0 | 0 | 0 | 0 | 2,015 | 0.944679 | 0 | 0 | 250 | 0.117206 |
ba15381f8ef090f523cd0dca0d47f9f990a3ed66 | 1,853 | py | Python | observatory-platform/observatory/platform/utils/proc_utils.py | metasj/observatory-platform | e684f9dc55a1a0fde1b27193eda10532198c4168 | [
"Apache-2.0"
] | null | null | null | observatory-platform/observatory/platform/utils/proc_utils.py | metasj/observatory-platform | e684f9dc55a1a0fde1b27193eda10532198c4168 | [
"Apache-2.0"
] | null | null | null | observatory-platform/observatory/platform/utils/proc_utils.py | metasj/observatory-platform | e684f9dc55a1a0fde1b27193eda10532198c4168 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: James Diprose
from subprocess import Popen
from typing import Tuple
def wait_for_process(proc: Popen) -> Tuple[str, str]:
""" Wait for a process to finish, returning the std output and std error streams as strings.
:param proc: the process object.
:return: std output and std error streams as strings.
"""
output, error = proc.communicate()
output = output.decode('utf-8')
error = error.decode('utf-8')
return output, error
def stream_process(proc: Popen, debug: bool) -> Tuple[str, str]:
""" Print output while a process is running, returning the std output and std error streams as strings.
:param proc: the process object.
:param debug: whether debug info should be displayed.
:return: std output and std error streams as strings.
"""
output_concat = ''
error_concat = ''
while True:
for line in proc.stdout:
output = line.decode('utf-8')
if debug:
print(output, end='')
output_concat += output
for line in proc.stderr:
error = line.decode('utf-8')
print(error, end='')
error_concat += error
if proc.poll() is not None:
break
return output_concat, error_concat
| 33.690909 | 107 | 0.671344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,086 | 0.586077 |
ba157f63ae5acff9ccfb66ad9328574fb2aa9997 | 1,639 | py | Python | tests/test_day12.py | dmies/adventOfCode | 55761aa91f5616d556653956d95b869f8e5a7bb5 | [
"Apache-2.0"
] | null | null | null | tests/test_day12.py | dmies/adventOfCode | 55761aa91f5616d556653956d95b869f8e5a7bb5 | [
"Apache-2.0"
] | null | null | null | tests/test_day12.py | dmies/adventOfCode | 55761aa91f5616d556653956d95b869f8e5a7bb5 | [
"Apache-2.0"
] | 1 | 2020-11-25T12:17:34.000Z | 2020-11-25T12:17:34.000Z | from day12 import Moon, simulate_moons, get_steps_to_find_same_state, get_total_energy
def test_init_handles_string_input():
line = "<x=15, y=-2, z=-6>"
expected_x = 15
expected_y = -2
expected_z = -6
result = Moon(line)
assert result.x == expected_x
assert result.y == expected_y
assert result.z == expected_z
def test_apply_velocity():
# x=1, y=2, z=3 and a velocity of x=-2, y=0,z=3, then its new position would be x=-1, y=2, z=6
moon = Moon("<x=1, y=2, z=3>")
moon.velocity = (-2, 0, 3)
expected = Moon("<x=-1, y=2, z=6>")
expected.velocity = (-2, 0, 3)
moon.apply_velocity()
assert moon == expected
def test_get_total_energy():
expected = 179
start_input = [
"<x=-1, y=0, z=2>",
"<x=2, y=-10, z=-7>",
"<x=4, y=-8, z=8>",
"<x=3, y=5, z=-1>",
]
moons = [Moon(row) for row in start_input]
result = get_total_energy(moons, 10)
assert result == expected
def test_get_total_energy_2nd_example():
expected = 1940
start_input = [
"<x=-8, y=-10, z=0>",
"<x=5, y=5, z=10>",
"<x=2, y=-7, z=3>",
"<x=9, y=-8, z=-3>",
]
moons = [Moon(row) for row in start_input]
result = get_total_energy(moons, 100)
assert result == expected
def test_get_steps_to_find_same_state():
expected = 4686774924
start_input = [
"<x=-8, y=-10, z=0>",
"<x=5, y=5, z=10>",
"<x=2, y=-7, z=3>",
"<x=9, y=-8, z=-3>",
]
moons = [Moon(row) for row in start_input]
result = get_steps_to_find_same_state(moons)
assert result == expected
| 24.833333 | 98 | 0.559487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.227578 |
ba16fa59961b765da18297dbd32815b00e0a3cbc | 3,628 | py | Python | SudaAutoLogin.py | a386881/- | 7ff1d2725d4e9691f4aabd46c928ed1a554375d2 | [
"Apache-2.0"
] | 1 | 2018-03-28T16:35:46.000Z | 2018-03-28T16:35:46.000Z | SudaAutoLogin.py | a386881/- | 7ff1d2725d4e9691f4aabd46c928ed1a554375d2 | [
"Apache-2.0"
] | null | null | null | SudaAutoLogin.py | a386881/- | 7ff1d2725d4e9691f4aabd46c928ed1a554375d2 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
VERSION = r'2016/04/02'
# 配置中心
URL = "http://wg.suda.edu.cn/indexn.aspx"
STUDENT_ID = 'XXXXXXXXXXXXXXXXXXXX'
STUDENT_PASSWORD = 'XXXXXXXXXXXXXXXXXXXX'
'''
Created on 2016年4月2日
@author: XenoAmess
'''
import urllib.parse
import urllib.request
import time
def get_html(url):
'''首先通讯,获得一个html字符串,返回这个字符串'''
html_byte = urllib.request.urlopen(url).read()
html_str = str(html_byte, "utf-8")
return html_str
def txt_wrap_by(start_str, end_str, html_str):
'''取出字符串html_str中的,被start_str与end_str包绕的字符串'''
start = html_str.find(start_str)
if start >= 0:
start += len(start_str)
end = html_str.find(end_str, start)
if end >= 0:
return html_str[start:end].strip()
def read_html_for_money(html_str):
'''读取拿到的html,返回钱数'''
reg__L = r"</font><br/><br/><font color='#000'>您的帐户余额是<font color='#ff0000'><b>"
reg__R = r"</b></font>元。</font><br><br>"
money = txt_wrap_by(reg__L, reg__R, html_str)
return money
def read_html_for_keys(html_str):
'''读取拿到的html,返回两个关键值'''
reg__VIEWSTATE = r'<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value="'
reg__EVENTVALIDATION = r'<input type="hidden" name="__EVENTVALIDATION" id="__EVENTVALIDATION" value="'
reg__right = r'" />'
viewstate = txt_wrap_by(reg__VIEWSTATE, reg__right, html_str)
eventvalidation = txt_wrap_by(reg__EVENTVALIDATION, reg__right, html_str)
return viewstate, eventvalidation
# file_input = open('data.txt')
# html_str = file_input.read()
# file_input.close()
# read_html(html_str)
def reget_html(url, string_student_id, string_student_password, VIEWSTATE, EVENTVALIDATION):
'''再次向html通信'''
search = urllib.parse.urlencode([
('__EVENTTARGET', ''),
('__EVENTARGUMENT', ''),
('__VIEWSTATE', VIEWSTATE),
('__EVENTVALIDATION', EVENTVALIDATION),
('TextBox1', string_student_id),
('TextBox2', string_student_password),
('nw', 'RadioButton2'),
('tm', 'RadioButton8'),
('Button1', '登录网关'),
])
search = bytes(search, encoding="utf8")
html_byte = urllib.request.urlopen(url, search).read()
html_str = str(html_byte, "utf-8")
return html_str
def output_to_file(file_name_str, file_str):
file_output = open(file_name_str, 'w')
file_output.write(file_str)
file_output.close()
def main():
html_str = get_html(URL)
output_to_file('before.html', html_str)
VIEWSTATE, EVENTVALIDATION = read_html_for_keys(html_str)
html_str = reget_html(URL, STUDENT_ID, STUDENT_PASSWORD, VIEWSTATE, EVENTVALIDATION)
output_to_file('after.html', html_str)
print(time.strftime("%c"))
money = read_html_for_money(html_str)
if(money != None):
print('登录成功。您的账户余额为' + money + '元。')
else:
print('请检查您是否已登录。若现在仍无网络连接,则意味着本程序已失效。')
def runforever():
'''每隔十分钟自动登录一次'''
while(1):
main()
time.sleep(10 * 60)
if (__name__ == "__main__"):
print('苏大网关自动登录器')
print()
print('版本:')
print(VERSION)
print()
print('用户ID:')
print(STUDENT_ID)
print()
print('用户密码:')
print(STUDENT_PASSWORD)
print()
print('网关地址:')
print(URL)
print()
print('作者:')
print('XenoAmess')
print()
print('说明:')
print(r'由于网关似乎经常掉线,所以该程序每10分钟自动登录一次。只需要最小化以后挂在那里不用管就行了。当然了虽然选哪个按钮都是2小时以后就退出了,不过我还是模拟的选的10小时的按钮。如果您不需要多次的话,请翻源码把下一行的runforever()改成main()就行了。')
runforever()
| 29.495935 | 146 | 0.628997 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,674 | 0.401054 |
ba170cf1e4780562bdfa615594947700438fb2a3 | 1,965 | py | Python | app/healthcheck/monitor.py | publichealthengland/coronavirus-dashboard-easy-read | 786409d79341b4ded3c0204e7b487423681e9c28 | [
"MIT"
] | 1 | 2022-02-21T14:23:08.000Z | 2022-02-21T14:23:08.000Z | app/healthcheck/monitor.py | publichealthengland/coronavirus-dashboard-easy-read | 786409d79341b4ded3c0204e7b487423681e9c28 | [
"MIT"
] | 25 | 2021-01-19T13:41:36.000Z | 2022-03-04T09:07:50.000Z | app/healthcheck/monitor.py | publichealthengland/coronavirus-dashboard-easy-read | 786409d79341b4ded3c0204e7b487423681e9c28 | [
"MIT"
] | 3 | 2021-04-14T11:30:22.000Z | 2022-01-17T20:23:50.000Z | #!/usr/bin python3
"""
<Description of the programme>
Author: Pouria Hadjibagheri <pouria.hadjibagheri@phe.gov.uk>
Created: 15 Mar 2021
License: MIT
Contributors: Pouria Hadjibagheri
"""
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
from typing import Union
from http import HTTPStatus
from asyncio import get_event_loop, wait
# 3rd party:
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
# Internal:
from app.database.postgres import Connection
from app.storage import AsyncStorageClient
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Header
__author__ = "Pouria Hadjibagheri"
__copyright__ = "Copyright (c) 2021, Public Health England"
__license__ = "MIT"
__version__ = "0.0.1"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'run_healthcheck'
]
async def test_db():
async with Connection() as conn:
db_active = await conn.fetchval("SELECT NOW() AS timestamp;")
return {"db": f"healthy - {db_active}"}
async def test_storage():
async with AsyncStorageClient("pipeline", "info/seen") as blob_client:
blob = await blob_client.download()
blob_data = await blob.readall()
return {"storage": f"healthy - {blob_data.decode()}"}
async def run_healthcheck(request: Request) -> Union[JSONResponse, Response]:
loop = get_event_loop()
tasks = [
# loop.create_task(test_db()), # Too frequent - needs to be revised.
loop.create_task(test_storage())
]
response = dict()
done, pending = await wait(tasks)
for future in done:
response.update(future.result())
if request.method == 'GET':
return JSONResponse(response, status_code=HTTPStatus.OK.real)
return Response(content=None, status_code=HTTPStatus.NO_CONTENT.real)
| 27.291667 | 90 | 0.60458 | 0 | 0 | 0 | 0 | 0 | 0 | 965 | 0.491094 | 812 | 0.413232 |
ba174b62313fc6bd666f2be4dd49654e375a89a1 | 8,797 | py | Python | src/dev/gym-tensegrity/gym_tensegrity/envs/jumper_test.py | hany606/Tensegrity_Robotics | 60b34ded95fb641842d46add450e398149d7ec92 | [
"Apache-2.0"
] | null | null | null | src/dev/gym-tensegrity/gym_tensegrity/envs/jumper_test.py | hany606/Tensegrity_Robotics | 60b34ded95fb641842d46add450e398149d7ec92 | [
"Apache-2.0"
] | null | null | null | src/dev/gym-tensegrity/gym_tensegrity/envs/jumper_test.py | hany606/Tensegrity_Robotics | 60b34ded95fb641842d46add450e398149d7ec92 | [
"Apache-2.0"
] | null | null | null | import gym
import gym_tensegrity
import numpy as np
import os
from time import sleep
# Discrete action space functions testing
def main(port_num=10042):
def print_observation(obs):
print("Observations {:}".format(obs))
env = gym.make('gym_tensegrity:jumper-v0')
# action = randint(0,15)
action = 14
# print("Action: {:}".format(action))
init_obs ,_,_,_=env.step(action)
# print_observation(init_obs)
# print(env.env.actions_json)
# print("")
# input("-> check point: WAIT for INPUT !!!!")
# for i in range(50):
# input("-> check point: WAIT for INPUT !!!!")
# observation, reward, done, _= env.step(action)
# print_observation(observation)
# print("Done:???:{:}".format(done))
# input("-> check point: WAIT for INPUT !!!!")
# for i in range(1,1001):
# action = env.action_space.sample()
# # action = 2
# input("-> check point: WAIT for INPUT !!!!")
# print("--------------- ({:}) ---------------".format(i))
# print("######\nAction: {:}\n######".format(action))
# observation, reward, done, _= env.step(action)
# print_observation(observation)
# print("Done:???:{:}".format(done))
# input("-> check point: WAIT for INPUT !!!!")
# for i in range(50):
# observation, reward, done, _= env.step(2)
# input("-> check point: WAIT for INPUT !!!!")
flag = 0
# i = 0
while True:
# i += 1
# print(i)
# if(i > 100):
# i = 0
# env.reset()
inp = "d"
# inp = input("~~~~~~input: ")
if(inp == "w"):
flag = 1
elif(inp == "s"):
flag = -1
elif(inp == "d"):
flag = 0
if(flag <= 0):
observation, reward, done, _= env.step(4)
observation, reward, done, _= env.step(5)
if(flag >= 0):
observation, reward, done, _= env.step(12)
observation, reward, done, _= env.step(13)
print(observation)
print("angle:{:}".format(observation[-1]*180/np.pi))
def forked_process_main():
port_num_base = 10042
num_threads = 2
for i in range(num_threads):
pid = os.fork()
print("fork {:}".format(pid))
if(pid > 0):
print("Child: {:} -> on port: {:}".format(pid, port_num_base+i))
config = {"port_num":port_num_base+i}
main(config)
def threaded_main():
import threading
port_num_base = 10042
num_threads = 10
threads_list = []
for i in range(num_threads):
config = {"port_num":port_num_base+i}
threads_list.append(threading.Thread(target=main, args=(config,)))
for i in range(num_threads):
threads_list[i].start()
# Continuous action space for lengths function testing
def main_cont_lengths(port_num=10042):
def print_observation(obs):
print("Observations {:}".format(obs))
env = gym.make('gym_tensegrity:jumper-v0')
# action = randint(0,15)
action = [7.95 for i in range(8)]
# action[0] = 5
print("Action: {:}".format(action))
# input("-> check point: WAIT for INPUT !!!!")
init_obs ,_,_,_=env.step(action)
print_observation(init_obs)
# print(env.env.actions_json)
# print("")
# input("-> check point: WAIT for INPUT !!!!")
flag = 0
# i = 0
while True:
observation, reward, done, _= env.step(init_obs[:-1])
print(observation)
print("angle:{:}".format(observation[-1]*180/np.pi))
# Continuous action space for delta lengths function testing
def main_cont_dlengths(config):
def print_observation(obs):
print("Observations {:}".format(obs))
tot_reward = 0
env = gym.make('gym_tensegrity:jumper-v0', config=config)
# action = randint(0,15)
action = np.array([0. for i in range(8)])
# action[0] = 1.7
print("Action: {:}".format(action))
# input("-> check point: WAIT for INPUT !!!!")
init_obs ,tot_reward,done,_=env.step(action)
print_observation(init_obs)
action[0] = 0
# print(env.env.actions_json)
# print("")
input("-> check point: WAIT for INPUT !!!!")
while not done:
action = env.action_space.sample()
observation, reward, done, _= env.step(action)
tot_reward += reward
print("Action: {:}".format(action))
# input("-> check point: WAIT for INPUT !!!!")
print("Reward: {:}, Done: {:}".format(reward,done))
print("Time: {:}".format(env.env.getTime()))
print_observation(observation)
print("angle:{:}".format(env.env.getLegAngle()*180/np.pi))
print("Total Reward: {:}".format(tot_reward))
# sleep(0.01)
input("-> check point: WAIT for INPUT !!!!")
while True:
inp = "d"
inp = input("~~~~~~input: ")
#action = env.action_space.sample()
#observation, reward, done, _= env.step(action)
if(inp == "w"):
flag = 1
elif(inp == "s"):
flag = -1
elif(inp == "d"):
flag = 0
if(flag < 0):
action[0] = -0.1
observation, reward, done, _= env.step(action)
# # action[0] = 0
# # observation, reward, done, _= env.step(action)
if(flag > 0):
action[0] = 0.1
observation, reward, done, _= env.step(action)
# # action[0] = 0
# # observation, reward, done, _= env.step(action)
if(flag == 0):
action[0] = 0
observation, reward, done, _= env.step(action)
print(observation)
print("angle:{:}".format(env.env.getLegAngle()*180/np.pi))
def test(config=None):
def print_observation(obs):
# This printing for the default observation
print("Observations: ")
for i in range(6):
print("#{:} End point: {:}".format(i+1, [obs[3*i:3*(i+1)]]))
print("---")
for i in range(6):
print("#{:} End point velocity: {:}".format(i+1, [obs[3*(i+6):3*(i+1+6)]]))
print("Leg angle:{:}".format(env.env.getLegAngle()*180/np.pi))
squre_sides_angles = env.env.getSquareSidesAngles()
print("Square side angle1:{:}".format(squre_sides_angles[0]*180/np.pi))
print("Square side angle2:{:}".format(squre_sides_angles[1]*180/np.pi))
print("----------------------------------")
if(config is not None):
env = gym.make('gym_tensegrity:jumper-v0', config=config)
if(config is None):
env = gym.make('gym_tensegrity:jumper-v0')
observation = env.reset()
print_observation(observation)
tot_reward = 0
action = np.array([0. for i in range(8)])
done = False
input("-> check point: WAIT for INPUT !!!!")
while not done:
#inp = input("INPUT")
# action = env.action_space.sample()
print("Action: {:}".format(action))
observation, reward, done, _= env.step(action)
tot_reward += reward
print("Reward: {:}, Done: {:}".format(reward,done))
print("Time: {:}".format(env.env.getTime()))
print_observation(observation)
print("angle:{:}".format(env.env.getLegAngle()*180/np.pi))
print("Total Reward: {:}".format(tot_reward))
# input("-> check point: WAIT for INPUT !!!!")
# sleep(0.01)
input("-> check point: WAIT for INPUT !!!!")
flag = 0
while True:
inp = 'd'
# inp = input("~~~~~~input: ")
#action = env.action_space.sample()
#observation, reward, done, _= env.step(action)
if(inp == "w"):
flag = 1
elif(inp == "s"):
flag = -1
elif(inp == "d"):
flag = 0
if(flag < 0):
action[0] = -0.1
if(flag > 0):
action[0] = 0.1
if(flag == 0):
action[0] = 0
observation, reward, done, _= env.step(action)
print(action)
print_observation(observation)
if __name__ == "__main__":
# test({'starting_coordinates':(0,10,0), "max_num_steps":1000, "starting_angle":(1.0001*np.pi/180,0)})
# test({'starting_coordinates':(0,100,0), "max_num_steps":10000, "starting_angle":(0,0), "starting_leg_angle": (0,0), "randomized_starting": False})
# test({"max_num_steps":10000, "randomized_starting": {"angle":[False], "height":[True, 10,100]}})
# test({'starting_coordinates':[0,10,0], "max_num_steps":10000, "randomized_starting": {"angle":[[True, True], [4,1],[10,3]], "height":[False]}})
test({'starting_coordinates':[0,10,0], "max_num_steps":10000, 'starting_leg_angle':[1,2],
'observation_noise': {"uncorrelated":{"mean":0,"stdev":1}, "correlated":{"mean":0,"stdev":1}}})
| 33.32197 | 152 | 0.545982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,538 | 0.402183 |
ba1783f6512fef6b2a7094ff616513eb52813862 | 8,476 | py | Python | lane_identifier.py | samguns/Project4-Advanced-Lane-Finding | 65b39e434f167f5e4da4f2d71fd9b485cd895545 | [
"MIT"
] | null | null | null | lane_identifier.py | samguns/Project4-Advanced-Lane-Finding | 65b39e434f167f5e4da4f2d71fd9b485cd895545 | [
"MIT"
] | null | null | null | lane_identifier.py | samguns/Project4-Advanced-Lane-Finding | 65b39e434f167f5e4da4f2d71fd9b485cd895545 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import cv2
class LaneIdentifier:
def __init__(self, smooth_factor, filter):
self.left_lane_inds = []
self.right_lane_inds = []
self.lane_gap = []
self.binary_warped = None
self.window_height = None
self.leftx_current = 0
self.rightx_current = 0
self.nonzeroy = None
self.nonzerox = None
self.left_fit = None
self.right_fit = None
self.margin = 100
self.nwindows = 9
self.minpix = 50
self.leftx = []
self.lefty = []
self.rightx = []
self.righty = []
self.smooth_factor = smooth_factor
self.filter = filter
return
def identify_lanes(self, binary):
self.binary_warped = binary
self.window_height = np.int(self.binary_warped.shape[0] // self.nwindows)
nonzero = binary.nonzero()
self.nonzeroy = np.array(nonzero[0])
self.nonzerox = np.array(nonzero[1])
if self.left_fit is None or self.right_fit is None:
self.blind_sliding_window_search()
else:
self.selective_window_search()
ret = self.extract_lane_lines()
if ret is False:
return False, None, None
return True, self.left_fit, self.right_fit
def blind_sliding_window_search(self):
histogram = np.sum(self.binary_warped[self.binary_warped.shape[0] // 2:, :], axis=0)
midpoint = np.int(histogram.shape[0] // 2)
leftx_current = np.argmax(histogram[:midpoint])
rightx_current = np.argmax(histogram[midpoint:]) + midpoint
l_lane_inds = []
r_lane_inds = []
for window in range(self.nwindows):
win_y_low = self.binary_warped.shape[0] - (window + 1) * self.window_height
win_y_high = self.binary_warped.shape[0] - window * self.window_height
win_xleft_low = leftx_current - self.margin
win_xleft_high = leftx_current + self.margin
win_xright_low = rightx_current - self.margin
win_xright_high = rightx_current + self.margin
good_left_inds = ((self.nonzeroy >= win_y_low) &
(self.nonzeroy < win_y_high) &
(self.nonzerox >= win_xleft_low) &
(self.nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((self.nonzeroy >= win_y_low) &
(self.nonzeroy < win_y_high) &
(self.nonzerox >= win_xright_low) &
(self.nonzerox < win_xright_high)).nonzero()[0]
l_lane_inds.append(good_left_inds)
r_lane_inds.append(good_right_inds)
if len(good_left_inds) > self.minpix:
leftx_current = np.int(np.mean(self.nonzerox[good_left_inds]))
if len(good_right_inds) > self.minpix:
rightx_current = np.int(np.mean(self.nonzerox[good_right_inds]))
self.left_lane_inds = np.concatenate(l_lane_inds)
self.right_lane_inds = np.concatenate(r_lane_inds)
return
def selective_window_search(self):
self.left_lane_inds = ((self.nonzerox >
(self.left_fit[0]*(self.nonzeroy**2) + self.left_fit[1]*self.nonzeroy +
self.left_fit[2] - self.margin)) &
(self.nonzerox <
(self.left_fit[0] * (self.nonzeroy ** 2) + self.left_fit[1]*self.nonzeroy +
self.left_fit[2] + self.margin)))
self.right_lane_inds = ((self.nonzerox >
(self.right_fit[0] * (self.nonzeroy ** 2) + self.right_fit[1] * self.nonzeroy +
self.right_fit[2] - self.margin)) &
(self.nonzerox <
(self.right_fit[0] * (self.nonzeroy ** 2) + self.right_fit[1] * self.nonzeroy +
self.right_fit[2] + self.margin)))
return
def extract_lane_lines(self):
# Extract left and right line pixel positions
leftx = self.nonzerox[self.left_lane_inds]
lefty = self.nonzeroy[self.left_lane_inds]
rightx = self.nonzerox[self.right_lane_inds]
righty = self.nonzeroy[self.right_lane_inds]
if leftx.size == 0 or rightx.size == 0:
if self.left_fit is None or self.right_fit is None:
return False
# Outliers filter, delete those that far away from previous
# recognized lane curve.
if self.left_fit is not None:
leftx_trend = self.left_fit[0]*lefty*lefty + self.left_fit[1]*lefty + self.left_fit[2]
range = abs(leftx - leftx_trend)
indices = (range > self.filter).nonzero()
leftx = np.delete(leftx, indices)
lefty = np.delete(lefty, indices)
if self.right_fit is not None:
rightx_trend = self.right_fit[0]*righty*righty + self.right_fit[1]*righty + self.right_fit[2]
range = abs(rightx - rightx_trend)
indices = (range > self.filter).nonzero()
rightx = np.delete(rightx, indices)
righty = np.delete(righty, indices)
# Take previous identified pixels into 2nd order polynomial
# calculation, in order to alleviate oscillation.
self.leftx = np.append(self.leftx, leftx)
self.lefty = np.append(self.lefty, lefty)
self.rightx = np.append(self.rightx, rightx)
self.righty = np.append(self.righty, righty)
self.leftx = self.leftx[-self.smooth_factor:]
self.lefty = self.lefty[-self.smooth_factor:]
self.rightx = self.rightx[-self.smooth_factor:]
self.righty = self.righty[-self.smooth_factor:]
# Fit a second order polynomial to each
self.left_fit = np.polyfit(self.lefty, self.leftx, 2)
self.right_fit = np.polyfit(self.righty, self.rightx, 2)
return True
def visualization(self):
# Generate x and y values for plotting
ploty = np.linspace(0, self.binary_warped.shape[0] - 1, self.binary_warped.shape[0])
left_fitx = self.left_fit[0] * ploty ** 2 + self.left_fit[1] * ploty + self.left_fit[2]
right_fitx = self.right_fit[0] * ploty ** 2 + self.right_fit[1] * ploty + self.right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((self.binary_warped, self.binary_warped, self.binary_warped)) * 255
fit_img = np.zeros_like(out_img)
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[self.nonzeroy[self.left_lane_inds], self.nonzerox[self.left_lane_inds]] = [255, 0, 0]
out_img[self.nonzeroy[self.right_lane_inds], self.nonzerox[self.right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - self.margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + self.margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - self.margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + self.margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
result = cv2.addWeighted(fit_img, 1, window_img, 0.3, 0)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
ax1.imshow(out_img)
ax1.set_title('Detected Lane Points', fontsize=30)
ax2.imshow(result)
ax2.set_title('Lane Lines', fontsize=30)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0) | 44.376963 | 111 | 0.590373 | 8,413 | 0.992567 | 0 | 0 | 0 | 0 | 0 | 0 | 639 | 0.075389 |
ba1793e21f50164606de991114f96e7cddb49ec8 | 1,331 | py | Python | test_tree.py | lightmanca/InterviewPrep | d77d6b8a1d9dd4f4d5b2d5ef38a5d1c9b2e50f07 | [
"Apache-2.0"
] | null | null | null | test_tree.py | lightmanca/InterviewPrep | d77d6b8a1d9dd4f4d5b2d5ef38a5d1c9b2e50f07 | [
"Apache-2.0"
] | null | null | null | test_tree.py | lightmanca/InterviewPrep | d77d6b8a1d9dd4f4d5b2d5ef38a5d1c9b2e50f07 | [
"Apache-2.0"
] | null | null | null | import pytest
from Tree import Tree
class TestClass:
def setup_class(self):
pass
def test_tree(self):
tree = Tree()
tree.add(3)
tree.add(4)
tree.add(0)
tree.add(8)
tree.add(2)
tree.print_tree()
assert True
def test_compare_trees(self):
tree1 = Tree()
tree2 = Tree()
assert tree1.compare_tree(tree2) is True
tree1.add(3)
assert tree1.compare_tree(tree2) is False
tree1.add(4)
tree1.add(0)
tree1.add(8)
tree1.add(2)
tree2.add(3)
tree2.add(4)
tree2.add(0)
tree2.add(8)
tree2.add(2)
assert tree1.compare_tree(tree2) is True
tree2.add(10)
assert tree1.compare_tree(tree2) is False
tree1.add(10)
assert tree1.compare_tree(tree2) is True
tree1.add(-1)
assert tree1.compare_tree(tree2) is False
tree2.add(-1)
assert tree1.compare_tree(tree2) is True
tree1.add(-5)
tree2.add(-3)
assert tree1.compare_tree(tree2) is False
tree2.add(-5)
tree1.add(-3)
tree1.print_tree()
# tree2.rebalance_tree()
#tree1.rebalance_tree()
#tree1.print_tree()
# assert tree1.compare_tree(tree2) is True
| 21.819672 | 50 | 0.555222 | 1,291 | 0.969947 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.081142 |
ba184328b10f564a0cd03008486c8ac39964d140 | 158 | py | Python | 03_Day_Operators/10.py | diegofregolente/30-Days-Of-Python | e0cad31f6d5ab1384ad6fa5a5d24a84771d6c267 | [
"Apache-2.0"
] | null | null | null | 03_Day_Operators/10.py | diegofregolente/30-Days-Of-Python | e0cad31f6d5ab1384ad6fa5a5d24a84771d6c267 | [
"Apache-2.0"
] | null | null | null | 03_Day_Operators/10.py | diegofregolente/30-Days-Of-Python | e0cad31f6d5ab1384ad6fa5a5d24a84771d6c267 | [
"Apache-2.0"
] | null | null | null | x1 = 1
y1 = 0
x2 = 0
y2 = -2
m1 = (y2 / x1)
x1 = 2
y1 = 2
x2 = 6
y2 = 10
m2 = (y2 - y1 / x2 - x1)
diff = m1 - m2
print(f'Diff of 8 and 9: {diff:.2f}') # 10 | 11.285714 | 43 | 0.468354 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.21519 |
ba19039d6d153acd59e2ad4f03240cced0340680 | 4,780 | py | Python | zsh/gitstatus.py | liuyang1/dotfiles | d43c051c2daff0f21cc1921f267786dcafcc7cca | [
"MIT"
] | 1 | 2021-01-15T03:41:46.000Z | 2021-01-15T03:41:46.000Z | zsh/gitstatus.py | liuyang1/dotfiles | d43c051c2daff0f21cc1921f267786dcafcc7cca | [
"MIT"
] | null | null | null | zsh/gitstatus.py | liuyang1/dotfiles | d43c051c2daff0f21cc1921f267786dcafcc7cca | [
"MIT"
] | 1 | 2019-05-27T11:39:08.000Z | 2019-05-27T11:39:08.000Z | #! /usr/ibn/env python2
# -*- encoding=utf-8 -*-
from __future__ import print_function
from subprocess import Popen, PIPE
import os
import serverlog
log = serverlog.inst()
def getGitStat(d):
gitstat = Popen(
['git', 'status', '--short', '--branch'],
stdout=PIPE, stderr=PIPE, cwd=d)
stat, error = gitstat.communicate()
error_string = error.decode('utf-8')
if 'fatal: Not a git repository' in error_string:
log.warn("not a git repo [%s]" % (d))
raise Exception("not a git repo")
return stat
def getGitHead(d):
githead = Popen(
['git', 'rev-parse', '--short', 'HEAD'],
stdout=PIPE, stderr=PIPE, cwd=d)
commit = githead.communicate()
ret = commit[0].strip()
return ret
def probeBranch(s, d):
"""
>>> probeBranch('## 7094...c [ahead 1]')
('7094', 'c', 1, 0)
>>> probeBranch('## HEAD (no branch)')
('HEAD', '', 0, 0)
>>> probeBranch('## 7094...c [ahead 1, behind 1]')
('7094', 'c', 1, 1)
"""
br, rmt, head, behind = "", "", 0, 0
s = s[3:]
initialHead = "Initial commit on "
if s.startswith(initialHead):
br = "Init"
return br, rmt, head, behind
arr = s.split("...")
br = arr[0]
# remove () content
idx = br.find(" (")
if idx != -1:
br = br[0:idx]
if br == 'HEAD':
br = getGitHead(d)
try:
rmt = arr[1]
except IndexError:
return br, rmt, head, behind
rmt = rmt.split()[0]
def getNumber(s, prefix, suffix):
head = s.find(prefix)
if head is -1:
return 0
head += len(prefix)
tail = s.find(suffix, head)
if tail is -1:
return 0
sub = s[head:tail]
try:
return int(sub)
except ValueError:
return 0
head = getNumber(s, "[ahead ", "]")
if head == 0:
head = getNumber(s, "[ahead ", ",")
behind = getNumber(s, "behind ", "]")
return br, rmt, head, behind
def probeLines(lines):
Untrack, uModify, uDelete = 0, 0, 0
Modify, Add, Delete = 0, 0, 0
for line in lines:
if line is "":
continue
X, Y = line[0], line[1]
if X is '!':
continue
Untrack += X is '?'
if X is " " or X is "M":
uModify += Y is 'M'
uDelete += Y is 'D'
Modify += X is 'M'
Add += X is 'A'
Delete += X is 'D'
return (Modify, Add, Delete), (uModify, uDelete, Untrack)
class RepoSt:
clean, unsync, stage, dirty = range(4)
def NotZero(v, sym=""):
return sym + str(v) if v is not 0 else ""
class Symb():
branch = ""
remove = "∆"
remote = "⌃"
# forward = "⌃"
# backward = "⌄"
# "⌃⌄"
# '><'
# "∧∨"
forward = '>'
backward = "<"
modify = "±"
add = "✚"
delt = "✖"
uModify = modify
uDelete = "x"
# Untrack = "✚"
# Untrack = "."
Untrack = "?"
delimiter = "•"
def fmtBranch(brRet):
br, rmt, head, behind = brRet
s = Symb.branch + br
log.debug("branch %s %s %s %s" % (br, rmt, head, behind))
if rmt != "origin/master" and rmt is not "":
idx = rmt.rfind("/")
if idx != -1:
rmt = rmt[idx + 1:]
if rmt != br:
s += Symb.remote + rmt
s += NotZero(head, Symb.forward)
s += NotZero(behind, Symb.backward)
status = RepoSt.unsync if head != 0 or behind != 0 else RepoSt.clean
return s, status
def fmtStage(num):
s = ""
Modify, Add, Delete = num
symbol = [Symb.modify, Symb.add, Symb.delt]
status = RepoSt.stage if sum(num) is not 0 else RepoSt.clean
for v, sym in zip(num, symbol):
s += NotZero(v, sym)
return s, status
def fmtDirty(num):
s = ""
uModify, uDelete, Untrack = num
symbol = [Symb.uModify, Symb.uDelete, Symb.Untrack]
# get status skip untrack
status = RepoSt.dirty if sum(num[0:2]) is not 0 else RepoSt.clean
for v, sym in zip(num, symbol):
s += NotZero(v, sym)
return s, status
def combSeg(br, stage, dirty):
arr = (fmtBranch(br), fmtStage(stage), fmtDirty(dirty))
seg, st = zip(*arr)
def NotNull(v, sym=""):
return sym + str(v) if v != "" else ""
s = seg[0]
if seg[1] != "" or seg[2] != "":
s += Symb.delimiter + seg[1]
s += Symb.delimiter + seg[2]
return s, max(st)
def main(d):
try:
stat = getGitStat(d)
except:
log.warn("getGitStat on d=[%s] fail" % (d))
return ""
lines = stat.split('\n')
br = probeBranch(lines[0], d)
stage, dirty = probeLines(lines[1:])
ret = combSeg(br, stage, dirty)
return ret[0] + " " + str(ret[1])
if __name__ == "__main__":
ret = main(os.getcwd())
print(ret)
| 24.020101 | 72 | 0.5159 | 409 | 0.085084 | 0 | 0 | 0 | 0 | 0 | 0 | 805 | 0.167464 |
ba193713f4e6daa42b6733b362bbc46c29b53357 | 10,391 | py | Python | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/module_utils/utm_utils.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/module_utils/utm_utils.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/module_utils/utm_utils.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
class UTMModuleConfigurationError(Exception):
def __init__(self, msg, **args):
super(UTMModuleConfigurationError, self).__init__(self, msg)
self.msg = msg
self.module_fail_args = args
def do_fail(self, module):
module.fail_json(msg=self.msg, other=self.module_fail_args)
class UTMModule(AnsibleModule):
"""
This is a helper class to construct any UTM Module. This will automatically add the utm host, port, token,
protocol, validate_certs and state field to the module. If you want to implement your own sophos utm module
just initialize this UTMModule class and define the Payload fields that are needed for your module.
See the other modules like utm_aaa_group for example.
"""
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False,
supports_check_mode=False, required_if=None):
default_specs = dict(
headers=dict(type='dict', required=False, default={}),
utm_host=dict(type='str', required=True),
utm_port=dict(type='int', default=4444),
utm_token=dict(type='str', required=True, no_log=True),
utm_protocol=dict(type='str', required=False, default="https", choices=["https", "http"]),
validate_certs=dict(type='bool', required=False, default=True),
state=dict(default='present', choices=['present', 'absent'])
)
super(UTMModule, self).__init__(self._merge_specs(default_specs, argument_spec), bypass_checks, no_log,
mutually_exclusive, required_together, required_one_of,
add_file_common_args, supports_check_mode, required_if)
def _merge_specs(self, default_specs, custom_specs):
result = default_specs.copy()
result.update(custom_specs)
return result
class UTM:
def __init__(self, module, endpoint, change_relevant_keys, info_only=False):
"""
Initialize UTM Class
:param module: The Ansible module
:param endpoint: The corresponding endpoint to the module
:param change_relevant_keys: The keys of the object to check for changes
:param info_only: When implementing an info module, set this to true. Will allow access to the info method only
"""
self.info_only = info_only
self.module = module
self.request_url = module.params.get('utm_protocol') + "://" + module.params.get('utm_host') + ":" + to_native(
module.params.get('utm_port')) + "/api/objects/" + endpoint + "/"
"""
The change_relevant_keys will be checked for changes to determine whether the object needs to be updated
"""
self.change_relevant_keys = change_relevant_keys
self.module.params['url_username'] = 'token'
self.module.params['url_password'] = module.params.get('utm_token')
if all(elem in self.change_relevant_keys for elem in module.params.keys()):
raise UTMModuleConfigurationError(
"The keys " + to_native(
self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native(
module.params.keys()))
def execute(self):
try:
if not self.info_only:
if self.module.params.get('state') == 'present':
self._add()
elif self.module.params.get('state') == 'absent':
self._remove()
else:
self._info()
except Exception as e:
self.module.fail_json(msg=to_native(e))
def _info(self):
"""
returns the info for an object in utm
"""
info, result = self._lookup_entry(self.module, self.request_url)
if info["status"] >= 400:
self.module.fail_json(result=json.loads(info))
else:
if result is None:
self.module.exit_json(changed=False)
else:
self.module.exit_json(result=result, changed=False)
def _add(self):
"""
adds or updates a host object on utm
"""
combined_headers = self._combine_headers()
is_changed = False
info, result = self._lookup_entry(self.module, self.request_url)
if info["status"] >= 400:
self.module.fail_json(result=json.loads(info))
else:
data_as_json_string = self.module.jsonify(self.module.params)
if result is None:
response, info = fetch_url(self.module, self.request_url, method="POST",
headers=combined_headers,
data=data_as_json_string)
if info["status"] >= 400:
self.module.fail_json(msg=json.loads(info["body"]))
is_changed = True
result = self._clean_result(json.loads(response.read()))
else:
if self._is_object_changed(self.change_relevant_keys, self.module, result):
response, info = fetch_url(self.module, self.request_url + result['_ref'], method="PUT",
headers=combined_headers,
data=data_as_json_string)
if info['status'] >= 400:
self.module.fail_json(msg=json.loads(info["body"]))
is_changed = True
result = self._clean_result(json.loads(response.read()))
self.module.exit_json(result=result, changed=is_changed)
def _combine_headers(self):
"""
This will combine a header default with headers that come from the module declaration
:return: A combined headers dict
"""
default_headers = {"Accept": "application/json", "Content-type": "application/json"}
if self.module.params.get('headers') is not None:
result = default_headers.copy()
result.update(self.module.params.get('headers'))
else:
result = default_headers
return result
def _remove(self):
"""
removes an object from utm
"""
is_changed = False
info, result = self._lookup_entry(self.module, self.request_url)
if result is not None:
response, info = fetch_url(self.module, self.request_url + result['_ref'], method="DELETE",
headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"},
data=self.module.jsonify(self.module.params))
if info["status"] >= 400:
self.module.fail_json(msg=json.loads(info["body"]))
else:
is_changed = True
self.module.exit_json(changed=is_changed)
def _lookup_entry(self, module, request_url):
"""
Lookup for existing entry
:param module:
:param request_url:
:return:
"""
response, info = fetch_url(module, request_url, method="GET", headers={"Accept": "application/json"})
result = None
if response is not None:
results = json.loads(response.read())
result = next(iter(filter(lambda d: d['name'] == module.params.get('name'), results)), None)
return info, result
def _clean_result(self, result):
"""
Will clean the result from irrelevant fields
:param result: The result from the query
:return: The modified result
"""
del result['utm_host']
del result['utm_port']
del result['utm_token']
del result['utm_protocol']
del result['validate_certs']
del result['url_username']
del result['url_password']
del result['state']
return result
def _is_object_changed(self, keys, module, result):
"""
Check if my object is changed
:param keys: The keys that will determine if an object is changed
:param module: The module
:param result: The result from the query
:return:
"""
for key in keys:
if module.params.get(key) != result[key]:
return True
return False
| 44.217021 | 119 | 0.626311 | 8,435 | 0.81176 | 0 | 0 | 0 | 0 | 0 | 0 | 3,983 | 0.383312 |
ba19b202492740345b68fb765ce97ad817af8b04 | 560 | py | Python | application/migrations/0002_auto_20200901_2057.py | KMaina/counselling-app | 16991dd0ede0e60e6f9886de283178cedd4ab58a | [
"MIT"
] | null | null | null | application/migrations/0002_auto_20200901_2057.py | KMaina/counselling-app | 16991dd0ede0e60e6f9886de283178cedd4ab58a | [
"MIT"
] | null | null | null | application/migrations/0002_auto_20200901_2057.py | KMaina/counselling-app | 16991dd0ede0e60e6f9886de283178cedd4ab58a | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-09-01 17:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='client',
name='link',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='client',
name='time',
field=models.DateTimeField(blank=True, max_length=50, null=True),
),
]
| 23.333333 | 77 | 0.575 | 469 | 0.8375 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.178571 |
ba1a7018c58b0366cadcc5857caa5cc11ed443e2 | 4,838 | py | Python | disks/bootstrap/root/lib.py | pnhowe/disks | 7e5170119704c5d3cdf0be9ff7dbdf7d0b052114 | [
"Apache-2.0"
] | null | null | null | disks/bootstrap/root/lib.py | pnhowe/disks | 7e5170119704c5d3cdf0be9ff7dbdf7d0b052114 | [
"Apache-2.0"
] | null | null | null | disks/bootstrap/root/lib.py | pnhowe/disks | 7e5170119704c5d3cdf0be9ff7dbdf7d0b052114 | [
"Apache-2.0"
] | null | null | null | import sys
import time
import subprocess
import re
_setMessage = None
class Bootstrap:
def __init__( self, identifier, contractor ):
self.identifier = identifier
self.request = contractor.request
self.request( 'call', '/api/v1/Survey/Cartographer(register)', { 'identifier': identifier } )
def lookup( self, info_map ):
return self.request( 'call', '/api/v1/Survey/Cartographer:{0}:(lookup)'.format( self.identifier ), { 'info_map': info_map } )
def setMessage( self, message ):
self.request( 'call', '/api/v1/Survey/Cartographer:{0}:(setMessage)'.format( self.identifier ), { 'message': message } )
def done( self ):
return self.request( 'call', '/api/v1/Survey/Cartographer:{0}:(done)'.format( self.identifier ), {} )
def setIdMap( self, foundation_locator, id_map ):
return self.request( 'call', '/api/v1/Building/Foundation:{0}:(setIdMap)'.format( foundation_locator ), { 'id_map': id_map } )
def setPXEBoot( self, foundation_locator, pxe ):
iface_list, info = self.request( 'list', '/api/v1/Utilities/RealNetworkInterface', { 'foundation': '/api/v1/Building/Foundation:{0}:'.format( foundation_locator ) }, filter='foundation' )
if info[ 'total' ] != info[ 'count' ]:
raise Exception( 'There are more interface than we got' ) # wow, what kind of machine do you have there?
for iface in iface_list:
self.request( 'update', iface, { 'pxe': '/api/v1/BluePrint/PXE:{0}:'.format( pxe ) } )
def ipmicommand( cmd, ignore_failure=False ):
proc = subprocess.run( [ '/bin/ipmitool' ] + cmd.split() )
if proc.returncode != 0:
if ignore_failure:
print( 'WARNING: ipmi cmd "{0}" failed, ignored...'.format( cmd ) )
else:
_setMessage( 'Ipmi Error with: "{0}"'.format( cmd ) )
sys.exit( 1 )
def getLLDP():
counter = 0
lldp_values = {}
results = {}
while True:
proc = subprocess.run( [ '/sbin/lldpcli', 'show', 'neighbors', '-f', 'keyvalue' ], shell=False, stdout=subprocess.PIPE )
lldp_data = str( proc.stdout, 'utf-8' ).strip()
if len( lldp_data ) > 10:
for line in lldp_data.splitlines():
if '=' not in line:
continue
( key, value ) = line.split( '=' )
lldp_values[key] = value
break
else:
if counter >= 10:
_setMessage( 'lldp timeout waiting for data, skipping...' )
return results
counter += 1
time.sleep( 10 )
for item in lldp_values:
( protocol, interface, name ) = item.split( '.', 2 ) # protocol, interface
if interface not in results:
results[ interface ] = {}
if name == 'chassis.mac':
results[ interface ][ 'mac' ] = lldp_values[ item ]
elif name == 'chassis.name':
results[ interface ][ 'name' ] = lldp_values[ item ]
elif name in ( 'port.local', 'port.ifname' ):
parts = re.sub( '[^0-9/]', '', lldp_values[ item ] ).split( '/' )
if len( parts ) == 1:
results[ interface ][ 'slot' ] = 1
results[ interface ][ 'port' ] = int( parts[0] )
results[ interface ][ 'subport' ] = 0
elif len( parts ) == 2:
results[ interface ][ 'slot' ] = int( parts[0] )
results[ interface ][ 'port' ] = int( parts[1] )
results[ interface ][ 'subport' ] = 0
elif len( parts ) == 3:
results[ interface ][ 'slot' ] = int( parts[0] )
results[ interface ][ 'port' ] = int( parts[1] )
results[ interface ][ 'subport' ] = int( parts[2] )
else:
_setMessage( 'I don\'t know how to handle this lldp local port "{0}"'.format( lldp_values[ item ] ) )
sys.exit( 1 )
return results
def cpuPhysicalCount():
wrk = []
cpuinfo = open( '/proc/cpuinfo', 'r' )
for line in cpuinfo.readlines():
if line.startswith( 'physical id' ) and line not in wrk:
wrk.append( line )
return len( wrk )
def cpuLogicalCount():
wrk = []
cpuinfo = open( '/proc/cpuinfo', 'r' )
for line in cpuinfo.readlines():
if line.startswith( 'processor' ) and line not in wrk:
wrk.append( line )
return len( wrk )
def getRAMAmmount():
meminfo = open( '/proc/meminfo', 'r' )
for line in meminfo.readlines():
if line.startswith( 'MemTotal' ):
return int( line.split( ':' )[1].strip().split( ' ' )[0] ) / 1024
def getIPMIMAC( lan_channel ):
proc = subprocess.run( [ '/bin/ipmitool', 'lan', 'print', str( lan_channel ) ], stdout=subprocess.PIPE )
lines = str( proc.stdout, 'utf-8' ).strip().splitlines()
for line in lines:
if line.startswith( 'MAC Address' ):
return line[ 25: ].strip()
return None
def getIpAddress( interface ):
proc = subprocess.run( [ '/sbin/ip', 'addr', 'show', 'dev', interface ], shell=False, stdout=subprocess.PIPE )
lines = str( proc.stdout, 'utf-8' ).strip().splitlines()
return lines[2].split()[1].split( '/' )[0]
| 32.689189 | 191 | 0.606656 | 1,391 | 0.287516 | 0 | 0 | 0 | 0 | 0 | 0 | 1,107 | 0.228814 |
ba1c71602bcc3a8f20aebbbae94098c99780e8ff | 152 | py | Python | src/backup_utils/databases/__init__.py | Oprax/backup-utils | 8de928d5257c9a67c65ca906e49596abe1e3b1ba | [
"MIT"
] | null | null | null | src/backup_utils/databases/__init__.py | Oprax/backup-utils | 8de928d5257c9a67c65ca906e49596abe1e3b1ba | [
"MIT"
] | null | null | null | src/backup_utils/databases/__init__.py | Oprax/backup-utils | 8de928d5257c9a67c65ca906e49596abe1e3b1ba | [
"MIT"
] | null | null | null | from functools import partial
from ..utils import load
__all__ = ["databases"]
databases = partial(load, pkg="backup_utils.databases", suffix="Db")
| 16.888889 | 68 | 0.743421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.256579 |
ba1cf387e7aec7401bb6e9dfcd3c708295ef07ce | 375 | py | Python | code_examples/tensorflow/basic_nmt_example/seq2seq_edits/attention_wrapper.py | Splendon/examples | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | [
"MIT"
] | null | null | null | code_examples/tensorflow/basic_nmt_example/seq2seq_edits/attention_wrapper.py | Splendon/examples | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | [
"MIT"
] | null | null | null | code_examples/tensorflow/basic_nmt_example/seq2seq_edits/attention_wrapper.py | Splendon/examples | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | [
"MIT"
] | null | null | null | # Copyright 2019 Graphcore Ltd.
'''
Edits to seq2seq AttentionWrapper
'''
import tensorflow as tf
class AttentionWrapperNoAssert(tf.contrib.seq2seq.AttentionWrapper):
# Stops the adding of Assert operations that "assert_equal" the wrapper batch_size and the attention_mechanisms batch_size
def _batch_size_checks(self, batch_size, error_message):
return []
| 31.25 | 126 | 0.784 | 274 | 0.730667 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.517333 |
ba1d7411408e4eae7740b57ca1aac506de98b421 | 45,178 | py | Python | deeppavlov/agents/coreference/models.py | deepmipt/kpi2017 | 0f6b13c6ea76e544804ce66ba372c66d5ef9ee30 | [
"Apache-2.0"
] | 3 | 2018-02-19T15:34:44.000Z | 2018-06-05T10:02:00.000Z | deeppavlov/agents/coreference/models.py | deepmipt/kpi2017 | 0f6b13c6ea76e544804ce66ba372c66d5ef9ee30 | [
"Apache-2.0"
] | null | null | null | deeppavlov/agents/coreference/models.py | deepmipt/kpi2017 | 0f6b13c6ea76e544804ce66ba372c66d5ef9ee30 | [
"Apache-2.0"
] | 1 | 2021-03-22T09:06:52.000Z | 2021-03-22T09:06:52.000Z | # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import os
import copy
import numpy as np
import tensorflow as tf
from . import utils
from os.path import isdir, join
tf.NotDifferentiable("Spans")
tf.NotDifferentiable("Antecedents")
tf.NotDifferentiable("ExtractMentions")
tf.NotDifferentiable("DistanceBins")
seed = 5
tf.set_random_seed(seed)
class CorefModel(object):
"""
End-to-end neural model for coreference resolution.
Class that create model from https://homes.cs.washington.edu/~kentonl/pub/lhlz-emnlp.2017.pdf
"""
def __init__(self, opt):
"""Initialize the class and model according to the given parameters in opt."""
self.opt = copy.deepcopy(opt)
tf.set_random_seed(opt['random_seed'])
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
coref_op_library = tf.load_op_library(join(opt['model_file'], "coref_kernels.so"))
self.spans = coref_op_library.spans
self.distance_bins = coref_op_library.distance_bins
self.extract_mentions = coref_op_library.extract_mentions
self.get_antecedents = coref_op_library.antecedents
dpath = join(self.opt['model_file'], self.opt['language'], 'agent')
self.log_root = join(dpath, 'logs')
self.char_embedding_size = self.opt["char_embedding_size"]
self.char_vocab_path = join(dpath, 'vocab', 'char_vocab.russian.txt')
self.char_dict = utils.load_char_dict(self.char_vocab_path)
if opt['emb_format'] == 'vec':
self.embedding_path = join(dpath, 'embeddings', 'embeddings_lenta_100.vec')
elif opt['emb_format'] == 'bin':
self.embedding_path = join(dpath, 'embeddings', 'ft_0.8.3_nltk_yalen_sg_300.bin')
else:
raise ValueError('Not supported embeddings format {}'.format(opt['emb_format']))
self.embedding_info = (self.opt["embedding_size"], self.opt["emb_lowercase"])
self.embedding_size = self.opt['embedding_size']
self.embedding_dicts = utils.load_embedding_dict(self.embedding_path, self.embedding_size,
self.opt["emb_format"])
self.max_mention_width = self.opt["max_mention_width"]
self.genres = {g: i for i, g in enumerate(self.opt["genres"])}
input_props = list()
input_props.append((tf.float64, [None, None, self.embedding_size])) # Text embeddings.
input_props.append((tf.int32, [None, None, None])) # Character indices.
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.int32, [None])) # Speaker IDs.
input_props.append((tf.int32, [])) # Genre.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # Gold starts.
input_props.append((tf.int32, [None])) # Gold ends.
input_props.append((tf.int32, [None])) # Cluster ids.
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=1, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
# train type trigger
if self.opt['train_on_gold']:
self.predictions, self.loss = self.get_predictions_and_loss_on_gold(*self.input_tensors)
else:
self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.reset_global_step = tf.assign(self.global_step, 0)
learning_rate = tf.train.exponential_decay(self.opt["learning_rate"], self.global_step,
self.opt["decay_frequency"], self.opt["decay_rate"],
staircase=True)
learning_rate = tf.cond(learning_rate < opt['final_rate'],
lambda: tf.Variable(opt['final_rate'], tf.float32),
lambda: learning_rate)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
# gradients = [g if g is None else tf.cast(g, tf.float64) for g in gradients]
# gradients, _ = tf.clip_by_global_norm(gradients, self.opt["max_gradient_norm"])
optimizers = {
"adam": tf.train.AdamOptimizer,
"sgd": tf.train.GradientDescentOptimizer
}
optimizer = optimizers[self.opt["optimizer"]](learning_rate)
self.train_op = optimizer.apply_gradients(zip(gradients, trainable_params), global_step=self.global_step)
self.sess = tf.Session(config=config)
self.init_op = tf.global_variables_initializer()
self.sess.run(self.init_op)
def start_enqueue_thread(self, train_example, is_training, returning=False):
"""
Initialize queue of tensors that feed one at the input of the model.
Args:
train_example: modified dict from agent
is_training: training flag
returning: returning flag
Returns:
if returning is True, return list of variables:
[word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids]
"""
tensorized_example = self.tensorize_example(train_example, is_training=is_training)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
self.sess.run(self.enqueue_op, feed_dict=feed_dict)
if returning:
return tensorized_example
def tensorize_mentions(self, mentions):
"""
Create two np.array of starts end ends positions of gold mentions.
Args:
mentions: list of tuple
Returns:
np.array(starts positions), np.array(ends positions)
"""
if len(mentions) > 0:
starts, ends = zip(*mentions)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)
def tensorize_example(self, example, is_training):
"""
Takes a dictionary from the observation and transforms it into a set of tensors
for tensorflow placeholders.
Args:
example: dict from observation
is_training: True or False value, use as a returned parameter or flag
Returns: word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids;
it numpy tensors for placeholders (is_training - bool)
If length of the longest sentence in the document is greater than parameter "max_training_sentences",
the returning method calls the 'truncate_example' function.
"""
clusters = example["clusters"]
gold_mentions = sorted(tuple(m) for m in utils.flatten(clusters))
gold_mention_map = {m: i for i, m in enumerate(gold_mentions)}
cluster_ids = np.zeros(len(gold_mentions))
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id
sentences = example["sentences"]
num_words = sum(len(s) for s in sentences)
speakers = utils.flatten(example["speakers"])
assert num_words == len(speakers)
max_sentence_length = max(len(s) for s in sentences)
max_word_length = max(max(max(len(w) for w in s) for s in sentences), max(self.opt["filter_widths"]))
word_emb = np.zeros([len(sentences), max_sentence_length, self.embedding_size])
char_index = np.zeros([len(sentences), max_sentence_length, max_word_length])
text_len = np.array([len(s) for s in sentences])
for i, sentence in enumerate(sentences):
for j, word in enumerate(sentence):
current_dim = 0
d = self.embedding_dicts
(s, l) = self.embedding_info
current_word = word
if l:
cerrent_word = word.lower()
if self.opt['emb_format'] == 'vec':
word_emb[i, j, current_dim:current_dim + s] = utils.normalize(d[current_word])
else:
word_emb[i, j, current_dim:current_dim + s] = utils.normalize(np.array(d[current_word]))
current_dim += s
char_index[i, j, :len(word)] = [self.char_dict[c] for c in word]
speaker_dict = {s: i for i, s in enumerate(set(speakers))}
speaker_ids = np.array([speaker_dict[s] for s in speakers]) # numpy
doc_key = example["doc_key"]
genre = self.genres[doc_key[:2]] # int 1
gold_starts, gold_ends = self.tensorize_mentions(gold_mentions) # numpy of unicode str
if is_training and len(sentences) > self.opt["max_training_sentences"]:
return self.truncate_example(word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts,
gold_ends, cluster_ids)
else:
return word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids
def truncate_example(self, word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends,
cluster_ids):
"""
It takes the output of the function "tensorize_example" and cuts off the excess part of the tensor.
Args:
word_emb: [Amount of sentences, Amount of words in sentence (max len), self.embedding_size],
float64, Text embeddings.
char_index: [Amount of words, Amount of chars in word (max len), char_embedding_size],
tf.int32, Character indices.
text_len: tf.int32, [Amount of sentences]
speaker_ids: [Amount of independent speakers], tf.int32, Speaker IDs.
genre: [Amount of independent genres], tf.int32, Genre
is_training: tf.bool
gold_starts: tf.int32, [Amount of gold mentions]
gold_ends: tf.int32, [Amount of gold mentions]
cluster_ids: tf.int32, [Amount of independent clusters]
Returns: word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids;
The same set of tensors as in the input, but with a corrected shape.
Additional Information:
"None" in some form-size tensors, for example "word_emb", means that this axis measurement can vary
from document to document.
"""
max_training_sentences = self.opt["max_training_sentences"]
num_sentences = word_emb.shape[0]
assert num_sentences > max_training_sentences
sentence_offset = random.randint(0, num_sentences - max_training_sentences)
word_offset = text_len[:sentence_offset].sum()
# don't clear what exactly is happening here
# why they cat the first part of tensor instead of second ???
num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()
word_emb = word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
char_index = char_index[sentence_offset:sentence_offset + max_training_sentences, :, :]
text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]
speaker_ids = speaker_ids[word_offset: word_offset + num_words]
assert len(gold_ends) == len(gold_starts)
Gold_starts = np.zeros((len(gold_starts)))
Gold_ends = np.zeros((len(gold_ends)))
for i in range(len(gold_ends)):
Gold_ends[i] = int(gold_ends[i])
Gold_starts[i] = int(gold_starts[i])
gold_starts = Gold_starts
gold_ends = Gold_ends
# here hernya
gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words)
gold_starts = gold_starts[gold_spans] - word_offset
gold_ends = gold_ends[gold_spans] - word_offset
cluster_ids = cluster_ids[gold_spans]
return word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids
def get_mention_emb(self, text_emb, text_outputs, mention_starts, mention_ends):
"""
Forms a tensor that contains of embeddings of specific mentions.
Args:
text_emb: boolean mask, [num_sentences, max_sentence_length, emb]
text_outputs: tf.float64, [num_sentences, max_sentence_length, emb]
mention_starts: tf.int32, [Amount of mentions]
mention_ends: tf.int32, [Amount of mentions]
Returns: tf.float64, [num_mentions, emb]
Mentions embeddings tensor.
"""
mention_emb_list = []
mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb]
mention_emb_list.append(mention_start_emb)
mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb]
mention_emb_list.append(mention_end_emb)
mention_width = 1 + mention_ends - mention_starts # [num_mentions]
if self.opt["use_features"]:
mention_width_index = mention_width - 1 # [num_mentions]
mention_width_emb = tf.gather(tf.get_variable("mention_width_embeddings", [self.opt["max_mention_width"],
self.opt["feature_size"]],
dtype=tf.float64),
mention_width_index) # [num_mentions, emb]
mention_width_emb = tf.nn.dropout(mention_width_emb, self.dropout)
mention_emb_list.append(mention_width_emb)
if self.opt["model_heads"]:
mention_indices = tf.expand_dims(tf.range(self.opt["max_mention_width"]), 0) + tf.expand_dims(
mention_starts, 1) # [num_mentions, max_mention_width]
mention_indices = tf.minimum(utils.shape(text_outputs, 0) - 1,
mention_indices) # [num_mentions, max_mention_width]
mention_text_emb = tf.gather(text_emb, mention_indices) # [num_mentions, max_mention_width, emb]
self.head_scores = utils.projection(text_outputs, 1) # [num_words, 1]
mention_head_scores = tf.gather(self.head_scores, mention_indices) # [num_mentions, max_mention_width, 1]
mention_mask = tf.expand_dims(
tf.sequence_mask(mention_width, self.opt["max_mention_width"], dtype=tf.float64),
2) # [num_mentions, max_mention_width, 1]
mention_attention = tf.nn.softmax(mention_head_scores + tf.log(mention_mask),
dim=1) # [num_mentions, max_mention_width, 1]
mention_head_emb = tf.reduce_sum(mention_attention * mention_text_emb, 1) # [num_mentions, emb]
mention_emb_list.append(mention_head_emb)
mention_emb = tf.concat(mention_emb_list, 1) # [num_mentions, emb]
return mention_emb
def get_mention_scores(self, mention_emb):
"""
Sends a mentions tensor to the input of a fully connected network, and outputs its output.
It compute mentions scores.
Args:
mention_emb: tf.float64, [num_mentions, emb], a tensor that contains of embeddings of specific mentions
Returns: [num_mentions, 1]
Output of the fully-connected network, that compute the mentions scores.
"""
with tf.variable_scope("mention_scores"):
return utils.ffnn(mention_emb, self.opt["ffnn_depth"], self.opt["ffnn_size"], 1,
self.dropout) # [num_mentions, 1]
def softmax_loss(self, antecedent_scores, antecedent_labels):
"""
Computes the value of the loss function using antecedent_scores and antecedent_labels.
Practically standard softmax function.
Args:
antecedent_scores: tf.float64, [num_mentions, max_ant + 1], output of fully-connected network that compute
antecedent scores.
antecedent_labels: True labels for antecedent.
Returns: [num_mentions]
The value of loss function.
"""
gold_scores = antecedent_scores + tf.log(tf.cast(antecedent_labels, tf.float64)) # [num_mentions, max_ant + 1]
marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [num_mentions]
log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [num_mentions]
return log_norm - marginalized_gold_scores # [num_mentions]
def get_antecedent_scores(self, mention_emb, mention_scores, antecedents, antecedents_len, mention_speaker_ids,
genre_emb):
"""
Forms a new tensor using special features, mentions embeddings, mentions scores, etc.
and passes it through a fully-connected network that compute antecedent scores.
Args:
mention_emb: [num_mentions, emb], a tensor that contains of embeddings of specific mentions
mention_scores: [num_mentions, 1], Output of the fully-connected network, that compute the mentions scores.
antecedents: [] get from C++ function
antecedents_len: [] get from C++ function
mention_speaker_ids: [num_mentions, speaker_emb_size], tf.float64, Speaker IDs.
genre_emb: [genre_emb_size], tf.float64, Genre
Returns: tf.float64, [num_mentions, max_ant + 1], antecedent scores.
"""
num_mentions = utils.shape(mention_emb, 0)
max_antecedents = utils.shape(antecedents, 1)
feature_emb_list = []
if self.opt["use_metadata"]:
antecedent_speaker_ids = tf.gather(mention_speaker_ids, antecedents) # [num_mentions, max_ant]
same_speaker = tf.equal(tf.expand_dims(mention_speaker_ids, 1),
antecedent_speaker_ids) # [num_mentions, max_ant]
speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.opt["feature_size"]],
dtype=tf.float64),
tf.to_int32(same_speaker)) # [num_mentions, max_ant, emb]
feature_emb_list.append(speaker_pair_emb)
tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0),
[num_mentions, max_antecedents, 1]) # [num_mentions, max_ant, emb]
feature_emb_list.append(tiled_genre_emb)
if self.opt["use_features"]:
target_indices = tf.range(num_mentions) # [num_mentions]
mention_distance = tf.expand_dims(target_indices, 1) - antecedents # [num_mentions, max_ant]
mention_distance_bins = self.distance_bins(mention_distance) # [num_mentions, max_ant]
mention_distance_bins.set_shape([None, None])
mention_distance_emb = tf.gather(tf.get_variable("mention_distance_emb", [10, self.opt["feature_size"]],
dtype=tf.float64),
mention_distance_bins) # [num_mentions, max_ant]
feature_emb_list.append(mention_distance_emb)
feature_emb = tf.concat(feature_emb_list, 2) # [num_mentions, max_ant, emb]
feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [num_mentions, max_ant, emb]
antecedent_emb = tf.gather(mention_emb, antecedents) # [num_mentions, max_ant, emb]
target_emb_tiled = tf.tile(tf.expand_dims(mention_emb, 1),
[1, max_antecedents, 1]) # [num_mentions, max_ant, emb]
similarity_emb = antecedent_emb * target_emb_tiled # [num_mentions, max_ant, emb]
pair_emb = tf.concat([target_emb_tiled, antecedent_emb, similarity_emb, feature_emb], 2)
# [num_mentions, max_ant, emb]
with tf.variable_scope("iteration"):
with tf.variable_scope("antecedent_scoring"):
antecedent_scores = utils.ffnn(pair_emb, self.opt["ffnn_depth"], self.opt["ffnn_size"], 1,
self.dropout) # [num_mentions, max_ant, 1]
antecedent_scores = tf.squeeze(antecedent_scores, 2) # [num_mentions, max_ant]
antecedent_mask = tf.log(
tf.sequence_mask(antecedents_len, max_antecedents, dtype=tf.float64)) # [num_mentions, max_ant]
antecedent_scores += antecedent_mask # [num_mentions, max_ant]
antecedent_scores += tf.expand_dims(mention_scores, 1) + tf.gather(mention_scores,
antecedents) # [num_mentions, max_ant]
antecedent_scores = tf.concat([tf.zeros([utils.shape(mention_scores, 0), 1], dtype=tf.float64),
antecedent_scores],
1) # [num_mentions, max_ant + 1]
return antecedent_scores # [num_mentions, max_ant + 1]
def flatten_emb_by_sentence(self, emb, text_len_mask):
"""
Create boolean mask for emb tensor.
Args:
emb: Some embeddings tensor with rank 2 or 3
text_len_mask: A mask tensor representing the first N positions of each row.
Returns: emb tensor after mask applications.
"""
num_sentences = tf.shape(emb)[0]
max_sentence_length = tf.shape(emb)[1]
emb_rank = len(emb.get_shape())
if emb_rank == 2:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])
elif emb_rank == 3:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, utils.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, text_len_mask)
def encode_sentences(self, text_emb, text_len, text_len_mask):
"""
Passes the input tensor through bi_LSTM.
Args:
text_emb: [num_sentences, max_sentence_length, emb], text code in tensor
text_len: tf.int32, [Amount of sentences]
text_len_mask: boolean mask for text_emb
Returns: [num_sentences, max_sentence_length, emb], output of bi-LSTM after boolean mask application
"""
num_sentences = tf.shape(text_emb)[0]
max_sentence_length = tf.shape(text_emb)[1]
# Transpose before and after for efficiency.
inputs = tf.transpose(text_emb, [1, 0, 2]) # [max_sentence_length, num_sentences, emb]
with tf.variable_scope("fw_cell"):
cell_fw = utils.CustomLSTMCell(self.opt["lstm_size"], num_sentences, self.dropout)
preprocessed_inputs_fw = cell_fw.preprocess_input(inputs)
with tf.variable_scope("bw_cell"):
cell_bw = utils.CustomLSTMCell(self.opt["lstm_size"], num_sentences, self.dropout)
preprocessed_inputs_bw = cell_bw.preprocess_input(inputs)
preprocessed_inputs_bw = tf.reverse_sequence(preprocessed_inputs_bw,
seq_lengths=text_len,
seq_dim=0,
batch_dim=1)
state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]),
tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]),
tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))
with tf.variable_scope("lstm"):
with tf.variable_scope("fw_lstm"):
fw_outputs, fw_states = tf.nn.dynamic_rnn(cell=cell_fw,
inputs=preprocessed_inputs_fw,
sequence_length=text_len,
initial_state=state_fw,
time_major=True)
with tf.variable_scope("bw_lstm"):
bw_outputs, bw_states = tf.nn.dynamic_rnn(cell=cell_bw,
inputs=preprocessed_inputs_bw,
sequence_length=text_len,
initial_state=state_bw,
time_major=True)
bw_outputs = tf.reverse_sequence(bw_outputs,
seq_lengths=text_len,
seq_dim=0,
batch_dim=1)
text_outputs = tf.concat([fw_outputs, bw_outputs], 2)
text_outputs = tf.transpose(text_outputs, [1, 0, 2]) # [num_sentences, max_sentence_length, emb]
return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
def get_predicted_antecedents(self, antecedents, antecedent_scores):
"""
Forms a list of predicted antecedent labels
Args:
antecedents: [] get from C++ function
antecedent_scores: [num_mentions, max_ant + 1] output of fully-connected network
that compute antecedent_scores
Returns: a list of predicted antecedent labels
"""
predicted_antecedents = []
for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1):
if index < 0:
predicted_antecedents.append(-1)
else:
predicted_antecedents.append(antecedents[i, index])
return predicted_antecedents
def get_predictions_and_loss(self, word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts,
gold_ends, cluster_ids):
"""
Connects all elements of the network to one complete graph, that compute mentions spans independently
And passes through it the tensors that came to the input of placeholders.
Args:
word_emb: [Amount of sentences, Amount of words in sentence (max len), self.embedding_size],
float64, Text embeddings.
char_index: [Amount of words, Amount of chars in word (max len), char_embedding_size],
tf.int32, Character indices.
text_len: tf.int32, [Amount of sentences]
speaker_ids: [Amount of independent speakers], tf.int32, Speaker IDs.
genre: [Amount of independent genres], tf.int32, Genre
is_training: tf.bool
gold_starts: tf.int32, [Amount of gold mentions]
gold_ends: tf.int32, [Amount of gold mentions]
cluster_ids: tf.int32, [Amount of independent clusters]
Returns:[candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents,
antecedent_scores], loss
List of predictions and scores, and Loss function value
"""
self.dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["dropout_rate"])
self.lexical_dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["lexical_dropout_rate"])
num_sentences = tf.shape(word_emb)[0]
max_sentence_length = tf.shape(word_emb)[1]
text_emb_list = [word_emb]
if self.opt["char_embedding_size"] > 0:
char_emb = tf.gather(
tf.get_variable("char_embeddings", [len(self.char_dict), self.opt["char_embedding_size"]]),
char_index, tf.float64) # [num_sentences, max_sentence_length, max_word_length, emb]
flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, utils.shape(char_emb, 2),
utils.shape(char_emb, 3)])
# [num_sentences * max_sentence_length, max_word_length, emb]
flattened_aggregated_char_emb = utils.cnn(flattened_char_emb, self.opt["filter_widths"], self.opt[
"filter_size"]) # [num_sentences * max_sentence_length, emb]
aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb,
[num_sentences,
max_sentence_length,
utils.shape(flattened_aggregated_char_emb, 1)])
# [num_sentences, max_sentence_length, emb]
text_emb_list.append(aggregated_char_emb)
text_emb = tf.concat(text_emb_list, 2)
text_emb = tf.nn.dropout(text_emb, self.lexical_dropout)
text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length)
text_len_mask = tf.reshape(text_len_mask, [num_sentences * max_sentence_length])
text_outputs = self.encode_sentences(text_emb, text_len, text_len_mask)
text_outputs = tf.nn.dropout(text_outputs, self.dropout)
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.opt["feature_size"]],
dtype=tf.float64),
genre) # [emb]
sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1),
[1, max_sentence_length]) # [num_sentences, max_sentence_length]
flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]
flattened_text_emb = self.flatten_emb_by_sentence(text_emb, text_len_mask) # [num_words]
candidate_starts, candidate_ends = self.spans(
sentence_indices=flattened_sentence_indices,
max_width=self.max_mention_width)
candidate_starts.set_shape([None])
candidate_ends.set_shape([None])
candidate_mention_emb = self.get_mention_emb(flattened_text_emb, text_outputs, candidate_starts,
candidate_ends) # [num_candidates, emb]
candidate_mention_scores = self.get_mention_scores(candidate_mention_emb) # [num_mentions, 1]
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [num_mentions]
k = tf.to_int32(tf.floor(tf.to_float(tf.shape(text_outputs)[0]) * self.opt["mention_ratio"]))
predicted_mention_indices = self.extract_mentions(candidate_mention_scores, candidate_starts,
candidate_ends, k) # ([k], [k])
predicted_mention_indices.set_shape([None])
mention_starts = tf.gather(candidate_starts, predicted_mention_indices) # [num_mentions]
mention_ends = tf.gather(candidate_ends, predicted_mention_indices) # [num_mentions]
mention_emb = tf.gather(candidate_mention_emb, predicted_mention_indices) # [num_mentions, emb]
mention_scores = tf.gather(candidate_mention_scores, predicted_mention_indices) # [num_mentions]
# mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb]
# mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb]
mention_speaker_ids = tf.gather(speaker_ids, mention_starts) # [num_mentions]
max_antecedents = self.opt["max_antecedents"]
antecedents, antecedent_labels, antecedents_len = self.get_antecedents(mention_starts, mention_ends,
gold_starts, gold_ends, cluster_ids,
max_antecedents)
# ([num_mentions, max_ant], [num_mentions, max_ant + 1], [num_mentions]
antecedents.set_shape([None, None])
antecedent_labels.set_shape([None, None])
antecedents_len.set_shape([None])
antecedent_scores = self.get_antecedent_scores(mention_emb, mention_scores, antecedents, antecedents_len,
mention_speaker_ids, genre_emb) # [num_mentions, max_ant + 1]
loss = self.softmax_loss(antecedent_scores, antecedent_labels) # [num_mentions]
loss = tf.reduce_sum(loss) # []
return [candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents,
antecedent_scores], loss
def get_predicted_clusters(self, mention_starts, mention_ends, predicted_antecedents):
"""
Creates a list of clusters, as in dict from observation, and dict mentions with a list of clusters
to which they belong. They are necessary for inference mode and marking a new conll documents without
last column.
Args:
mention_starts: tf.float64, [Amount of mentions]
mention_ends: tf.float64, [Amount of mentions]
predicted_antecedents: [len antecedent scores]
Returns:
predicted_clusters = [[(),(),()],[(),()]] list like, with mention id
mention_to_predicted = {mentions id: [(),(),()], ...}
"""
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index
predicted_antecedent = (int(mention_starts[predicted_index]), int(mention_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(mention_starts[i]), int(mention_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = {m: predicted_clusters[i] for m, i in mention_to_predicted.items()}
return predicted_clusters, mention_to_predicted
def init_from_saved(self, saver):
"""
Load model from saved checkpoint.
Args:
saver: tf.saver
Returns: Nothing
"""
# checkpoint_path = join(self.log_root, self.opt['name'])
checkpoint_path = self.opt['model_file']
if os.path.isfile(join(checkpoint_path, "model.max.ckpt.meta")):
saver.restore(self.sess, join(checkpoint_path, "model.max.ckpt"))
else:
print('{0} not found'.format(checkpoint_path))
print('Init from scratch')
def shutdown(self):
"""Reset the model"""
tf.reset_default_graph()
def save(self, saver):
"""Save model checkpoint"""
# log_dir = self.log_root
# if isdir(log_dir):
# if isdir(join(log_dir, self.opt['name'])):
# print('saving path ' + join(log_dir, self.opt['name'], 'model.max.ckpt'))
# saver.save(self.sess, join(log_dir, self.opt['name'], 'model.max.ckpt'))
# else:
# os.mkdir(self.opt['name'])
# print('saving path ' + join(log_dir, self.opt['name'], 'model.max.ckpt'))
# saver.save(self.sess, join(log_dir, self.opt['name'], 'model.max.ckpt'))
# else:
# os.mkdir(self.opt["log_root"])
# if isdir(join(log_dir, self.opt['name'])):
# print('saving path ' + join(log_dir, self.opt['name'], 'model.max.ckpt'))
# saver.save(self.sess, join(log_dir, self.opt['name'], 'model.max.ckpt'))
# else:
# os.mkdir(self.opt['name'])
# print('saving path ' + join(log_dir, self.opt['name'], 'model.max.ckpt'))
# saver.save(self.sess, join(log_dir, self.opt['name'], 'model.max.ckpt'))
# save in root folder
print('saving path ' + join(self.opt['model_file'], 'model.max.ckpt'))
saver.save(self.sess, join(self.opt['model_file'], 'model.max.ckpt'))
def train(self, batch):
"""
Run train operation on one batch/document
Args:
batch: list of tensors for placeholders, output of "tensorize_example" function
Returns: Loss functions value and tf.global_step
"""
self.start_enqueue_thread(batch, True)
self.tf_loss, tf_global_step, _ = self.sess.run([self.loss, self.global_step, self.train_op])
return self.tf_loss, tf_global_step
def predict(self, batch, out_file):
"""
Make prediction of new coreference clusters and write it conll document.
Args:
batch: list of tensors for placeholders, output of "tensorize_example" function
out_file: original conll document
Returns: str with new conll document, with new coreference clusters
"""
self.start_enqueue_thread(batch, False)
if self.opt['train_on_gold']:
_, mention_starts, mention_ends, antecedents, antecedent_scores = self.sess.run(self.predictions)
else:
_, _, _, mention_starts, mention_ends, antecedents, antecedent_scores = self.sess.run(self.predictions)
predicted_antecedents = self.get_predicted_antecedents(antecedents, antecedent_scores)
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(mention_starts, mention_ends,
predicted_antecedents)
new_cluters = dict()
new_cluters[batch['doc_key']] = predicted_clusters
outconll = utils.output_conll(out_file, new_cluters)
return outconll
def get_predictions_and_loss_on_gold(self, word_emb, char_index, text_len, speaker_ids, genre, is_training,
gold_starts, gold_ends, cluster_ids):
"""
Connects all elements of the network to one complete graph, that use gold mentions.
And passes through it the tensors that came to the input of placeholders.
Args:
word_emb: [Amount of sentences, Amount of words in sentence (max len), self.embedding_size],
float64, Text embeddings.
char_index: [Amount of words, Amount of chars in word (max len), char_embedding_size],
tf.int32, Character indices.
text_len: tf.int32, [Amount of sentences]
speaker_ids: [Amount of independent speakers], tf.int32, Speaker IDs.
genre: [Amount of independent genres], tf.int32, Genre
is_training: tf.bool
gold_starts: tf.int32, [Amount of gold mentions]
gold_ends: tf.int32, [Amount of gold mentions]
cluster_ids: tf.int32, [Amount of independent clusters]
Returns:[candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents,
antecedent_scores], loss
List of predictions and scores, and Loss function value
"""
self.dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["dropout_rate"])
self.lexical_dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["lexical_dropout_rate"])
# assert gold_ends.shape == gold_starts.shape,\
# ('Amount of starts and ends of gold mentions are not equal: '
# 'Length of gold starts: {1}; Length of gold ends: {0}'.format(gold_ends.shape, gold_starts.shape))
num_sentences = tf.shape(word_emb)[0]
max_sentence_length = tf.shape(word_emb)[1]
text_emb_list = [word_emb]
if self.opt["char_embedding_size"] > 0:
char_emb = tf.gather(
tf.get_variable("char_embeddings", [len(self.char_dict), self.opt["char_embedding_size"]],
dtype=tf.float64),
char_index) # [num_sentences, max_sentence_length, max_word_length, emb]
flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, utils.shape(char_emb, 2),
utils.shape(char_emb,
3)])
# [num_sentences * max_sentence_length, max_word_length, emb]
flattened_aggregated_char_emb = utils.cnn(flattened_char_emb, self.opt["filter_widths"], self.opt[
"filter_size"]) # [num_sentences * max_sentence_length, emb]
aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb,
[num_sentences,
max_sentence_length,
utils.shape(flattened_aggregated_char_emb, 1)])
# [num_sentences, max_sentence_length, emb]
text_emb_list.append(aggregated_char_emb)
text_emb = tf.concat(text_emb_list, 2)
text_emb = tf.nn.dropout(text_emb, self.lexical_dropout)
text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length)
text_len_mask = tf.reshape(text_len_mask, [num_sentences * max_sentence_length])
text_outputs = self.encode_sentences(text_emb, text_len, text_len_mask)
text_outputs = tf.nn.dropout(text_outputs, self.dropout)
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.opt["feature_size"]],
dtype=tf.float64),
genre) # [emb]
# sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1),
# [1, max_sentence_length]) # [num_sentences, max_sentence_length]
# flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]
flattened_text_emb = self.flatten_emb_by_sentence(text_emb, text_len_mask) # [num_words]
candidate_mention_emb = self.get_mention_emb(flattened_text_emb, text_outputs, gold_starts,
gold_ends) # [num_candidates, emb]
# candidate_mention_scores = self.get_mention_scores(candidate_mention_emb) # [num_mentions, 1]
# candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [num_mentions]
gold_len = tf.shape(gold_ends)
candidate_mention_scores = tf.ones(gold_len, dtype=tf.float64)
mention_starts = gold_starts
mention_ends = gold_ends
mention_emb = candidate_mention_emb
mention_scores = candidate_mention_scores
# mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb]
# mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb]
mention_speaker_ids = tf.gather(speaker_ids, mention_starts) # [num_mentions]
max_antecedents = self.opt["max_antecedents"]
antecedents, antecedent_labels, antecedents_len = self.get_antecedents(mention_starts, mention_ends,
gold_starts, gold_ends, cluster_ids,
max_antecedents)
# ([num_mentions, max_ant], [num_mentions, max_ant + 1], [num_mentions]
antecedents.set_shape([None, None])
antecedent_labels.set_shape([None, None])
antecedents_len.set_shape([None])
antecedent_scores = self.get_antecedent_scores(mention_emb, mention_scores, antecedents, antecedents_len,
mention_speaker_ids, genre_emb) # [num_mentions, max_ant + 1]
loss = self.softmax_loss(tf.cast(antecedent_scores, tf.float64), antecedent_labels) # [num_mentions]
loss = tf.reduce_sum(loss) # []
return [candidate_mention_scores, mention_starts, mention_ends, antecedents, antecedent_scores], loss
| 51.750286 | 119 | 0.622028 | 44,256 | 0.979592 | 0 | 0 | 0 | 0 | 0 | 0 | 16,289 | 0.360552 |
ba1daa29383c186dc2ccf373341df64a1f615073 | 7,379 | py | Python | applications/plugins/RigidScale/examples/mapping/rigidScaleToRigidMultiMapping.py | sofa-framework/issofa | 94855f488465bc3ed41223cbde987581dfca5389 | [
"OML"
] | null | null | null | applications/plugins/RigidScale/examples/mapping/rigidScaleToRigidMultiMapping.py | sofa-framework/issofa | 94855f488465bc3ed41223cbde987581dfca5389 | [
"OML"
] | null | null | null | applications/plugins/RigidScale/examples/mapping/rigidScaleToRigidMultiMapping.py | sofa-framework/issofa | 94855f488465bc3ed41223cbde987581dfca5389 | [
"OML"
] | null | null | null | import sys,os
import Sofa
import Flexible.IO
from Compliant import Tools as Tools
# main path
currentdir = os.path.dirname(os.path.realpath(__file__))+'/'
__file = __file__.replace('\\', '/') # windows compatible filename
source = './mesh/cube.obj'
target = './mesh/cube.obj'
voxel_size = 0.1
# ================================================================= #
# Method called in Sofa
# ================================================================= #
def createScene(root_node) :
# Required plugin
root_node.createObject('RequiredPlugin', pluginName='image')
root_node.createObject('RequiredPlugin', pluginName='Flexible')
root_node.createObject('RequiredPlugin', pluginName='Compliant')
root_node.createObject('RequiredPlugin', pluginName='RigidScale')
# Script launch
root_node.createObject('PythonScriptController', name='script', filename=__file, classname='MyClass')
# ================================================================= #
# Creation of the scene
# ================================================================= #
class MyClass(Sofa.PythonScriptController):
def createGraph(self, root):
# Variable
self.E_t = 0
self.E_t_dt = 0
self.root_node = root
# Sofa parameters
self.root_node.createObject('BackgroundSetting',color='1 1 1')
self.root_node.createObject('VisualStyle', displayFlags='showVisual hideWireframe showBehaviorModels hideForceFields hideInteractionForceFields')
self.root_node.createObject('StaticSolver')
self.root_node.createObject('CGLinearSolver', iterations=500, tolerance=1E-10, threshold=1E-10)
self.root_node.findData('gravity').value = '0 0 0'
# Object to transfer creation
node = self.root_node.createChild('cube_source')
node.createObject('MeshObjLoader',name='source', filename=source, triangulate=1, translation='0 0 0', rotation='0 0 0', scale3d='1 1 1')
node.createObject('MeshToImageEngine', template='ImageUC', name='rasterizer', src='@source', insideValue='1', voxelSize=voxel_size, padSize=0, rotateImage='false')
node.createObject('ImageContainer', template='ImageUC', name='image', src='@rasterizer', drawBB='false')
node.createObject('ImageSampler', template='ImageUC', name='sampler', src='@image', method=1, param='1 0', clearData=0)
node.createObject('MeshTopology', name='frame_topo', position='@sampler.position')
#================================ Target model ===================================
targetNode = node.createChild('target')
targetNode.createObject('MeshObjLoader', name='target', filename=target, triangulate=1, translation='2 0 0', rotation='0 90 45', scale3d='1.5 3 2', showObject=0)
targetNode.createObject('MechanicalObject', template='Vec3d', name='DOFs', src='@target', showObject=0)
targetNode.createObject('FixedConstraint', fixAll='1' )
targetVisuNode = targetNode.createChild('visu')
targetVisuNode.createObject('OglModel', template='ExtVec3f', name='visual', src='@../target', color='0.5 0.5 0.5 0.75')
#=================================== Scale =======================================
scaleNode = node.createChild('scale')
scaleNode.createObject('MechanicalObject', template='Vec3d', name='DOFs', position='1 1 1', showObject=0, showObjectScale='0.1')
#================================ Rigid frame ====================================
rigidNode = node.createChild('rigid')
rigidNode.createObject('MechanicalObject', template='Rigid3d', name='DOFs', src='@../frame_topo', showObject=0, showObjectScale='0.1')
#================== offsets mapped to both rigid and scale =======================
offsetNode = rigidNode.createChild('offset')
scaleNode.addChild(offsetNode)
offsetNode.createObject('MechanicalObject', template='Rigid3d', name='DOFs', position='0 1 0 0 0 0 1', showObject=1, showObjectScale='0.25')
offsetNode.createObject('RigidScaleToRigidMultiMapping', template='Rigid,Vec3d,Rigid', input1='@../../rigid/DOFs', input2='@../../scale/DOFs', output='@.', index='0 0 0', printLog='0')
#============================= Registration model ================================
objMainNode = rigidNode.createChild('main')
scaleNode.addChild(objMainNode)
# scene creation
loader = objMainNode.createObject('MeshObjLoader',name='source', filename=source, triangulate=1, translation='0 0 0', rotation='0 0 0', scale3d='1 1 1')
objMainNode.createObject('MeshToImageEngine', template='ImageUC', name='rasterizer', src='@source', value=1, insideValue=1, voxelSize=voxel_size, padSize=0, rotateImage='false')
objMainNode.createObject('ImageContainer', template='ImageUC', name='image', src='@rasterizer', drawBB='false')
objMainNode.createObject('MechanicalObject', template='Affine', name='parent', src='@../../frame_topo', showObject=1, showObjectScale='0.1')
objMainNode.createObject('RigidScaleToAffineMultiMapping', template='Rigid,Vec3d,Affine', input1='@../../rigid/DOFs', input2='@../../scale/DOFs', output='@.', index='0 0 0', printLog='0')
objMainNode.createObject('VoronoiShapeFunction', template='ShapeFunctiond,ImageUC', name='SF', position='@parent.rest_position', image='@image.image', transform='@image.transform', nbRef=4, clearData=1, bias=0)
# Contact
objContactNode = objMainNode.createChild('registration')
objContactNode.createObject('MeshTopology', name='topo', src='@../source')
objContactNode.createObject('MechanicalObject', name='DOFs')
objContactNode.createObject('UniformMass', totalMass=1)
objContactNode.createObject('TriangleModel')
objContactNode.createObject('LinearMapping', template='Affine,Vec3d')
# Visual model
objVisuNode = objContactNode.createChild('visual')
objVisuNode.createObject('OglModel', template='ExtVec3f', name='visual', src='@../topo', color='1 0.2 0.2 0.9')
objVisuNode.createObject('IdentityMapping', template='Vec3d,ExtVec3f')
# Registration
objRegistrationNode = objMainNode.createChild('force')
objRegistrationNode.createObject('MechanicalObject', template='Vec3d', name='DOFs', src='@../source')
objRegistrationNode.createObject('LinearMapping', template='Affine,Vec3d')
# registration force field
springs = ""
for i in range(len(loader.position)):
springs += str(i)+' '+str(i)+' '
distanceNode = objRegistrationNode.createChild('registration_constraint')
targetNode.addChild(distanceNode)
distanceNode.createObject('MechanicalObject', template='Vec3d', name='distanceDOFs')
distanceNode.createObject('DifferenceMultiMapping', template='Vec3d,Vec3d', input='@'+Tools.node_path_rel(distanceNode, targetNode)+' @'+Tools.node_path_rel(distanceNode, objRegistrationNode), output='@.', pairs=springs, showObjectScale="0.005")
distanceNode.createObject('UniformCompliance', name='constraint', isCompliance=0, compliance=1E-6, damping=0.1) | 68.324074 | 254 | 0.623933 | 6,270 | 0.849709 | 0 | 0 | 0 | 0 | 0 | 0 | 2,929 | 0.396937 |
ba1e60f8480ebcf3873159f3b4490651fbabea8f | 1,447 | py | Python | peptidereactor/iFeature/codes/TA.py | spaenigs/peptidereactor | 17efcb993505934f5b9c2d63f5cc040bb244dde9 | [
"MIT"
] | 3 | 2021-02-03T12:30:37.000Z | 2021-06-07T07:03:38.000Z | peptidereactor/iFeature/codes/TA.py | spaenigs/peptidereactor | 17efcb993505934f5b9c2d63f5cc040bb244dde9 | [
"MIT"
] | 1 | 2021-01-04T14:52:27.000Z | 2021-01-04T14:52:27.000Z | peptidereactor/iFeature/codes/TA.py | spaenigs/peptidereactor | 17efcb993505934f5b9c2d63f5cc040bb244dde9 | [
"MIT"
] | 1 | 2021-06-09T16:16:16.000Z | 2021-06-09T16:16:16.000Z | #!/usr/bin/env python
#_*_coding:utf-8_*_
import sys, os, re
pPath = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(pPath)
import checkFasta
def TA(fastas, **kw):
if checkFasta.checkFasta(fastas) == False:
print('Error: for "TA" encoding, the input fasta sequences should be with equal length. \n\n')
return 0
encodings = []
header = ['#']
for p in range(1, len(fastas[0][1])+1):
header.append('TA.F' + str(p) + '.phi')
header.append('TA.F' + str(p) + '.psi')
encodings.append(header)
disDir = kw['path']
if disDir == None:
print('Error: please specify the directory of predicted protein TA file by "--path"')
return 0
for i in fastas:
name, sequence = i[0], i[1]
code = [name]
if os.path.exists(disDir + '/' + name + '.spXout') == False:
print('Error: the predicted TA information file (.spXout) for protein ' + name + ' does not exist.')
return 0
with open(disDir + '/' + name + '.spXout') as f:
records = f.readlines()[1:]
proteinSeq = ''
asaValue = []
for line in records:
array = line.strip().split() if line.strip() != '' else None
proteinSeq = proteinSeq + array[1]
asaValue.append(array[3:5])
pos = proteinSeq.find(sequence)
if pos == -1:
print('Warning: could not find the peptide in proteins.\n\n')
else:
for p in range(pos, pos+len(sequence)):
code.append(asaValue[p][0])
code.append(asaValue[p][1])
encodings.append(code)
return encodings
| 28.372549 | 103 | 0.645473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.278507 |
ba1fa0ba40676d80533fb64114e6150ae8ea61f6 | 3,714 | py | Python | services/mx/service.py | sbworth/getnoc | a9a5647df31822062db3db7afe7ae1c005d166f7 | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | services/mx/service.py | sbworth/getnoc | a9a5647df31822062db3db7afe7ae1c005d166f7 | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | services/mx/service.py | sbworth/getnoc | a9a5647df31822062db3db7afe7ae1c005d166f7 | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | #!./bin/python
# ----------------------------------------------------------------------
# mx service
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from typing import Dict
# Third-party modules
import orjson
# NOC modules
from noc.core.service.fastapi import FastAPIService
from noc.core.mx import MX_STREAM
from noc.config import config
from noc.core.liftbridge.message import Message
from noc.core.mx import MX_SHARDING_KEY
from noc.services.mx.router.router import Router
from noc.services.mx.router.action import DROP
from noc.core.perf import metrics
class MXService(FastAPIService):
name = "mx"
use_mongo = True
if config.features.traefik:
traefik_backend = "mx"
traefik_frontend_rule = "PathPrefix:/api/mx"
def __init__(self):
super().__init__()
self.slot_number = 0
self.total_slots = 0
self.router = Router()
self.stream_partitions: Dict[str, int] = {}
async def on_activate(self):
self.router.load()
self.slot_number, self.total_slots = await self.acquire_slot()
await self.subscribe_stream(MX_STREAM, self.slot_number, self.on_message, async_cursor=True)
async def on_message(self, msg: Message) -> None:
metrics["messages"] += 1
# Apply routes
self.logger.debug("[%d] Receiving message %s", msg.offset, msg.headers)
for route in self.router.iter_route(msg):
metrics["route_hits"] += 1
self.logger.debug("[%d] Applying route %s", msg.offset, route.name)
# Apply actions
routed: bool = False
for stream, action_headers in route.iter_action(msg):
metrics["action_hits"] += 1
# Fameless drop
if stream == DROP:
metrics["action_drops"] += 1
self.logger.debug("[%s] Dropped. Stopping processing", msg.offset)
return
# Build resulting headers
headers = {}
headers.update(msg.headers)
if action_headers:
headers.update(action_headers)
# Determine sharding channel
sharding_key = int(headers.get(MX_SHARDING_KEY, b"0"))
partitions = self.stream_partitions.get(stream)
if not partitions:
# Request amount of partitions
partitions = await self.get_stream_partitions(stream)
self.stream_partitions[stream] = partitions
partition = sharding_key % partitions
# Single message may be transmuted in zero or more messages
for body in route.iter_transmute(headers, msg.value):
if not isinstance(body, bytes):
# Transmute converts message to an arbitrary structure,
# so convert back to the json
body = orjson.dumps(body)
metrics[("forwards", "%s:%s" % (stream, partition))] += 1
self.logger.debug("[%s] Routing to %s:%s", msg.offset, stream, partition)
self.publish(value=body, stream=stream, partition=partition, headers=headers)
routed = True
if not routed:
self.logger.debug("[%d] Not routed", msg.offset)
metrics["route_misses"] += 1
self.logger.debug("[%s] Finish processing", msg.offset)
if __name__ == "__main__":
MXService().start()
| 39.935484 | 100 | 0.558697 | 2,915 | 0.784868 | 0 | 0 | 0 | 0 | 2,524 | 0.679591 | 900 | 0.242326 |
ba20ee15679f38813c36a98fec465c7fe081a8b2 | 4,993 | py | Python | rectifier/config/config_parser.py | SectorLabs/heroku-rectifier | 46094b168efe59837ea6f11386f27bf406b17bf0 | [
"MIT"
] | 7 | 2019-04-05T07:46:31.000Z | 2022-01-19T20:39:19.000Z | rectifier/config/config_parser.py | SectorLabs/heroku-rectifier | 46094b168efe59837ea6f11386f27bf406b17bf0 | [
"MIT"
] | 9 | 2019-05-18T08:49:37.000Z | 2021-04-01T06:23:58.000Z | rectifier/config/config_parser.py | SectorLabs/heroku-rectifier | 46094b168efe59837ea6f11386f27bf406b17bf0 | [
"MIT"
] | null | null | null | import json
from json import JSONDecodeError
from typing import Dict
import jsonschema
import structlog
import schemas
from rectifier.config import Config, AppConfig, QueueConfig, CoordinatorConfig, AppMode
from rectifier.storage.storage import Storage
from rectifier import settings
LOGGER = structlog.get_logger(__name__)
class ConfigReadError(RuntimeError):
"""Error that occurs while reading
the configuration."""
pass
class ConfigParser:
"""A utility class for parsing and validating configuration."""
def __init__(self, storage: Storage) -> None:
"""Takes the configuration from the storage, and tries to parse it."""
storage_config = storage.get(settings.REDIS_CONFIG_KEY)
config_dict = None
if storage_config is None:
LOGGER.info('No configuration found in the storage.')
else:
try:
config_dict = json.loads(storage_config)
except (JSONDecodeError, TypeError):
LOGGER.info(
'Failed to parse the storage configuration.', config=storage_config
)
self.raw_config = config_dict
self.config = ConfigParser.from_dict(config_dict) if config_dict else None
LOGGER.info('Using configuration:', config=self.config)
@classmethod
def from_dict(cls, data: Dict) -> Config:
"""Reads the configuration from the specified dictionary."""
cls.validate(data)
apps = dict()
for (app, config) in data.items():
queues = dict()
mode = AppMode(config.get('mode', AppMode.SCALE.value))
for (queue_name, queue_properties) in cls._queue_configs(config):
queues[queue_name] = QueueConfig(
queue_name=queue_name, **queue_properties
)
apps[app] = AppConfig(queues=queues, mode=mode)
return Config(coordinator_config=CoordinatorConfig(apps=apps))
@classmethod
def validate(cls, data: Dict) -> None:
"""Validates the specified raw configuration.
Raises:
ConfigReadError:
When the configuration does not match
the schema.
"""
try:
jsonschema.validate(data, schemas.Config.SCHEMA)
except jsonschema.ValidationError as err:
message = 'Failed to validate configuration.'
LOGGER.error(message, config=data, err=err)
raise ConfigReadError(message) from err
for (app, config) in data.items():
app_mode = config.get('mode', AppMode.SCALE.value)
try:
AppMode(app_mode)
except ValueError as err:
message = f'Improper value for app mode: {app_mode}. Possible values: {[mode.value for mode in AppMode]}'
raise ConfigReadError(message) from err
for (queue_name, queue_properties) in cls._queue_configs(config):
intervals = queue_properties['intervals']
workers = queue_properties['workers']
cooldown = queue_properties['cooldown']
if len(intervals) != len(workers):
message = 'The length of the intervals array should match the length of the workers array.'
LOGGER.error(message, queue_name=queue_name)
raise ConfigReadError(message)
if intervals[0] != 0:
message = 'The first interval should start with 0.'
LOGGER.error(message, intervals=intervals, queue_name=queue_name)
raise ConfigReadError(message)
if any([interval < 0 for interval in intervals]):
message = (
'The entries in the message intervals should all be positive.'
)
LOGGER.error(message, intervals=intervals, queue_name=queue_name)
raise ConfigReadError(message)
if any([worker < 0 for worker in workers]):
message = (
'The entries in the workers count array should all be positive.'
)
LOGGER.error(message, workers=workers, queue_name=queue_name)
raise ConfigReadError(message)
if cooldown < 0:
message = 'The cooldown should be positive.'
LOGGER.error(message, cooldown=cooldown, queue_name=queue_name)
raise ConfigReadError(message)
if sorted(intervals) != intervals:
message = 'The intervals should be sorted in ascending order.'
LOGGER.error(message, intervals=intervals, queue_name=queue_name)
raise ConfigReadError(message)
@staticmethod
def _queue_configs(app_config: Dict):
return ((k, v) for (k, v) in app_config.items() if k != 'mode')
| 37.825758 | 121 | 0.597036 | 4,660 | 0.933307 | 0 | 0 | 3,662 | 0.733427 | 0 | 0 | 1,056 | 0.211496 |
ba213c7916b1147b628e29019f58a21312675a83 | 8,433 | py | Python | market.py | hype-ecosystem/predictions_bot | 82925ed4faed736cf95d42751c7d02e00a9c819c | [
"MIT"
] | 2 | 2020-01-24T11:36:24.000Z | 2021-02-19T00:11:42.000Z | market.py | hype-ecosystem/predictions_bot | 82925ed4faed736cf95d42751c7d02e00a9c819c | [
"MIT"
] | null | null | null | market.py | hype-ecosystem/predictions_bot | 82925ed4faed736cf95d42751c7d02e00a9c819c | [
"MIT"
] | 1 | 2020-06-09T13:24:10.000Z | 2020-06-09T13:24:10.000Z | import subprocess as sp
import datetime
import sys
import re
import os
import pwd
import logging
import logging.handlers
import bitfinex_api
from dbmanager import DatabaseManager
from plot_provider import PlotProvider
import queue
class Market:
def __init__(self, path, symbol, message_queue):
self._message_queue = message_queue
# Configure logger
self._logger = logging.getLogger(f"{symbol}_MarketLogger")
self._logger.setLevel(logging.ERROR)
handler = logging.handlers.SysLogHandler(address='/dev/log')
self._logger.addHandler(handler)
self._plotProvider = PlotProvider()
# Path structure:
# path
# - genotick/
# - genotick.jar
# - <market_name>/
# - config.txt
# - data/
# - <market_symbol>.csv
# - robots/
# - robot files
self._path = os.path.abspath(path)
self._symbol = symbol
self._db = DatabaseManager()
self._genotick_path = fr"{self._path}/genotick/genotick.jar"
self._data_path = fr"{self._path}/{self._symbol}/data/{self._symbol}.csv"
self._reverse_data_path = fr"{self._path}/{self._symbol}/data/reverse_{self._symbol}.csv"
self._gen_config_path = fr"{self._path}/{self._symbol}/config.txt"
self._robots_path = fr"{self._path}/robots"
def genotick_predict_and_train(self):
try:
ts_prediction_start = self._db.get_last_predictions_ts(self._symbol)
ts_history_start = self._db.get_last_history_ts(self._symbol) * 1000
if(ts_prediction_start is None):
ts_prediction_start = ts_history_start
else:
ts_prediction_start *= 1000
ts_history_start += 60 * 60 * 1000
print("Collecting history data...")
history = bitfinex_api.append_1h_history(
ts_history_start, self._symbol, self._data_path)
print("Adding data to database...")
self._db.append_market_history(history, self._symbol)
print("Configuring genotick for prediction...")
self._configure_genotick_prediction(ts_prediction_start)
print("Creating reverse data file...")
self._make_reverse_data_file()
print("Running genotick for prediction...")
predictions = self._parse_prediction_output(self._genotick_predict())
if len(predictions) == 0:
self._logger.info(f"No predictions for market {self._symbol}")
return
print("Queuing predictions and plot to bot queue...")
self._enqueue_predictions(predictions)
#self._enqueue_market_plot()
print("Updating predictions in database...")
self._db.update_predictions(predictions, self._symbol)
print("Configuring genotick for training...")
self._configure_genotick_training(ts_history_start)
print("Running genotick for training...")
self._genotick_train()
except Exception:
self._logger.exception(f"Failed to predict and train with genotick for market {self._symbol}")
def _get_custom_env(self):
result = os.environ.copy()
result["GENOTICK_LOG_FILE"] = f"{self._symbol}_genotick_log.txt"
return result
def _genotick_predict(self):
command = ["java",
"-jar",
self._genotick_path,
f"input=file:{self._gen_config_path}"]
cp = sp.run(command, env=self._get_custom_env(), universal_newlines=True, stdout=sp.PIPE, stderr=sp.PIPE)
if cp.returncode != 0:
raise RuntimeError(f"Failed to run genotick in prediction mode for market {self._symbol}.", cp.stdout, cp.stderr)
return cp.stdout
def _parse_prediction_output(self, output):
pattern = re.compile(
fr"^[\w\/\s]+\/{self._symbol}\.[\sa-z]+(\d+)[a-z\s]+\:\s(OUT|UP|DOWN)$", re.MULTILINE)
items = pattern.findall(output)
# Add one hour for predictions timestamp
predictions = list()
for item in items:
predictions.append((int(item[0])/1000 + 60 * 60, item[1]))
return predictions
def _enqueue_predictions(self, predictions):
for p in predictions:
ts = datetime.datetime.utcfromtimestamp(int(p[0])).strftime('%Y-%m-%d %H:%M:%S')
message = f"{ts} {self._symbol[1:]} {p[1]}"
self._message_queue.put({'type': 'text', 'data': message})
def _enqueue_market_plot(self):
data = self._db.get_24h_plot_data(self._symbol)
image = self._plotProvider.get_market_24plot(data, self._symbol[1:])
self._message_queue.put({'type': 'image', 'data': image})
def _remove_old_reverse_data_file(self):
command = ["rm", "-f", self._reverse_data_path]
cp = sp.run(command, universal_newlines=True, stdout=sp.PIPE, stderr=sp.PIPE)
if cp.returncode != 0:
raise RuntimeError(f"Failed to remove reverse data file for market {self._symbol}.", cp.stdout, cp.stderr)
def _make_reverse_data_file(self):
self._remove_old_reverse_data_file()
command = ["java",
"-jar",
self._genotick_path,
f"reverse={self._data_path}"]
cp = sp.run(command, env=self._get_custom_env(), universal_newlines=True, stdout=sp.PIPE, stderr=sp.PIPE)
if cp.returncode != 0:
raise RuntimeError(f"Genotick failed to create reverse data file for market {self._symbol}. ", cp.stdout, cp.stderr)
def _configure_genotick_prediction(self, start):
command = ["sed",
"-i",
"-e",
r"s:\([#\s]*\)\(performTraining\s\+\)\(.\+\):\2false:",
"-e",
fr"s:\([#\s]*\)\(startTimePoint\s\+\)\(.\+\):\2{start}:",
"-e",
r"s/^[^#]*endTimePoint/#&/",
self._gen_config_path]
cp = sp.run(command, universal_newlines=True, stdout=sp.PIPE, stderr=sp.PIPE)
if cp.returncode != 0:
raise RuntimeError(f"Failed to configure genotick for prediction for market {self._symbol}.", cp.stdout, cp.stderr)
def _configure_genotick_training(self, start):
command = ["sed",
"-i",
"-e",
r"s:\([#\s]*\)\(performTraining\s\+\)\(.\+\):\2true:",
"-e",
fr"s:\([#\s]*\)\(startTimePoint\s\+\)\(.\+\):\2{start}:",
"-e",
r"s/^[^#]*endTimePoint/#&/",
self._gen_config_path]
cp = sp.run(command, universal_newlines=True, stdout=sp.PIPE, stderr=sp.PIPE)
if cp.returncode != 0:
raise RuntimeError(f"Failed to configure genotick for training for market {self._symbol}.", cp.stdout, cp.stderr)
def _genotick_train(self):
command = ["java",
"-jar",
self._genotick_path,
f"input=file:{self._gen_config_path}"]
with sp.Popen(command, env=self._get_custom_env(), universal_newlines=True, stdout=sp.PIPE, stderr=sp.PIPE) as proc:
pid = proc.pid
try:
outs, errs = proc.communicate(timeout=(45 * 60))
except TimeoutError:
proc.kill()
outs, errs = proc.communicate()
raise RuntimeError(f"Failed to run genotick in training mode for market {self._symbol}. Error: {outs}. {errs}")
newRobotsPath = f"savedPopulation_{pid}"
#print(fr"New population path for market {self._symbol} is {newRobotsPath}")
command = ["rm", "-f", "-r", self._robots_path, "&&", "mv", newRobotsPath, self._robots_path]
cp = sp.run(command, universal_newlines=True, stdout=sp.PIPE, stderr=sp.PIPE)
if cp.returncode != 0:
raise RuntimeError(f"Failed to move new robots for market {self._symbol}.", cp.stdout, cp.stderr)
def main(argv):
usage = "usage: {} market_symbol market_path".format(argv[0])
if len(argv) != 3:
print(usage)
sys.exit(1)
market = Market(argv[2], argv[1], queue.Queue())
market.genotick_predict_and_train()
if __name__ == "__main__":
main(sys.argv)
| 43.694301 | 128 | 0.5903 | 7,911 | 0.9381 | 0 | 0 | 0 | 0 | 0 | 0 | 2,280 | 0.270366 |
ba21d65b7d1cc8f2d7ea73d9ecfaf83200df9067 | 709 | py | Python | launch/gl_ros2_driver_udp.py | soslab-project/gl_ros2_driver_udp | 40382327a433a5feff4f2bb09d6228129eaa6ee3 | [
"BSD-3-Clause"
] | 3 | 2021-09-15T04:54:08.000Z | 2021-12-21T06:47:40.000Z | launch/gl_ros2_driver_udp.py | soslab-project/gl_ros2_driver_udp | 40382327a433a5feff4f2bb09d6228129eaa6ee3 | [
"BSD-3-Clause"
] | 1 | 2021-10-15T08:55:47.000Z | 2021-10-15T08:55:47.000Z | launch/gl_ros2_driver_udp.py | soslab-project/gl_ros2_driver_udp | 40382327a433a5feff4f2bb09d6228129eaa6ee3 | [
"BSD-3-Clause"
] | null | null | null | import os
import launch
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
gl_ros2_driver_udp = Node(
node_name = 'gl_ros2_driver_udp',
package = 'gl_ros2_driver_udp',
node_executable = 'gl_ros2_driver_udp_node',
output = 'screen',
parameters = [
{'gl_ip': '10.110.1.2'},
{'gl_port': 2000},
{'pc_port': 3000},
{'frame_id': 'laser'},
{'pub_topicname_lidar': 'scan'},
{'angle_offset': 0.0},
],
)
ld = launch.LaunchDescription()
ld.add_action( gl_ros2_driver_udp )
return ld
| 25.321429 | 67 | 0.599436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.236953 |
ba245a0af638dbd063878dec00e0c87df1fbc19a | 1,367 | py | Python | Analysis Scripts/sum_individual.py | bonesbb/HASPR | 856af4a480f4ff135591bbbcc1267898d88cbf0d | [
"MIT"
] | 1 | 2019-07-07T19:54:17.000Z | 2019-07-07T19:54:17.000Z | Analysis Scripts/sum_individual.py | bonesbb/HASPR | 856af4a480f4ff135591bbbcc1267898d88cbf0d | [
"MIT"
] | null | null | null | Analysis Scripts/sum_individual.py | bonesbb/HASPR | 856af4a480f4ff135591bbbcc1267898d88cbf0d | [
"MIT"
] | null | null | null | # HASPR - High-Altitude Solar Power Research
# Script to calculate annual sums given generation profiles
# Version 0.1
# Author: neyring
import haspr
from haspr import Result
from numpy import genfromtxt
from os import walk
import numpy as np
# PARAMETERS #
# path to directory containing generation profiles to sum individually:
inputDirectory = "D:\\00_Results\\02_Generation Profiles\\Case 1 - Flat" \
"\\0 Individual Expected Output - per m2"
# directory to write output to:
haspr.outputDirectory = "D:\\00_Results\\Out"
# OS path delimiter ("\\" for windows, "/" for unix)"
haspr.osPathDelimiter = "\\"
# cycle through files and build result array:
result_array = []
generation_file_names = []
for (dirpath, dirnames, filenames) in walk(inputDirectory):
generation_file_names.extend(filenames)
for f in generation_file_names:
path = inputDirectory + haspr.osPathDelimiter + f
extracted_data = genfromtxt(path, delimiter=',', skip_header=1)
gen_values = extracted_data[:, 1]
summed = np.sum(gen_values)
split = f.split(" ")
result_str = split[1] + "," + str(summed)
result_array.append(result_str)
# build result:
result = Result("Individual Sums")
result.payload.append("Profile,Total Output per m2")
for r in result_array:
result.payload.append(r)
result.dump()
| 32.547619 | 75 | 0.705194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 548 | 0.400878 |