code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
from django.contrib.auth import get_user_model
from rest_framework.fields import CharField
from rest_framework.serializers import ModelSerializer
from grandchallenge.challenges.models import Challenge
from grandchallenge.components.serializers import (
ComponentInterfaceValueSerializer,
)
from grandchallenge.evaluation.models import (
Evaluation,
Phase,
Submission,
)
class UserSerializer(ModelSerializer):
class Meta:
model = get_user_model()
fields = ("username",)
class ChallengeSerializer(ModelSerializer):
class Meta:
model = Challenge
fields = (
"title",
"short_name",
)
class PhaseSerializer(ModelSerializer):
challenge = ChallengeSerializer()
class Meta:
model = Phase
fields = (
"challenge",
"title",
"slug",
)
class SubmissionSerializer(ModelSerializer):
phase = PhaseSerializer()
creator = UserSerializer()
class Meta:
model = Submission
fields = (
"pk",
"phase",
"created",
"creator",
"comment",
"predictions_file",
"supplementary_file",
"supplementary_url",
)
class EvaluationSerializer(ModelSerializer):
submission = SubmissionSerializer()
outputs = ComponentInterfaceValueSerializer(many=True)
status = CharField(source="get_status_display", read_only=True)
title = CharField(read_only=True)
class Meta:
model = Evaluation
fields = (
"pk",
"method",
"submission",
"created",
"published",
"outputs",
"rank",
"rank_score",
"rank_per_metric",
"status",
"title",
)
| comic/comic-django | app/grandchallenge/evaluation/serializers.py | Python | apache-2.0 | 1,856 |
import gzip
import logging
import os
import shutil
import uuid
from PIL import Image
from zipfile import ZipFile
import pandas as pd
import numpy as np
import pyresample as pr
from trollvalidation.validations import configuration as cfg
LOG = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG,
# format='[%(levelname)s: %(asctime)s: %(name)s] %(message)s',
# datefmt='%Y-%m-%d %H:%M:%S')
class TmpFiles(object):
"""docstring for TmpFiles"""
def __init__(self, files=[]):
super(TmpFiles, self).__init__()
if isinstance(files, list):
self.tmpfiles = files
else:
self.tmpfiles = [files]
def append(self, files):
if isinstance(files, list):
self.tmpfiles += files
else:
self.tmpfiles.append(files)
def cleanup(self):
map(os.remove, self.files)
def cleanup(_, tmp_files):
# Delete files first and the remove directories
for tmp_file in tmp_files:
if os.path.isfile(tmp_file):
LOG.info("Cleaning up... {0}".format(tmp_file))
os.remove(tmp_file)
for tmp_folder in tmp_files:
if os.path.exists(tmp_folder):
LOG.info("Cleaning up... {0}".format(tmp_folder))
shutil.rmtree(tmp_folder)
def write_to_csv(results, description_str=''):
# prevent empty results "None" blocking the writing of CSV files
results = filter(lambda l: l, results)
if results:
if cfg.CSV_HEADER:
df = pd.DataFrame(results, index=zip(*results)[0],
columns=cfg.CSV_HEADER)
else:
df = pd.DataFrame(results, index=zip(*results)[0])
df.to_csv(os.path.join(cfg.OUTPUT_DIR, '{0}_results.csv'.format(
description_str)))
def get_area_def(file_handle):
"""
This function is a utility function to read the area definition
of corresponding to an ice concentration product.
:param file_handle: str
Path to an ice concentration product in NetCDF product.
:return: AreaDefinition
The parsed area definition corresponding to the projection
and area extent of the product.
"""
file_name = os.path.basename(file_handle)
if 'NH25kmEASE2' in file_name:
cfg_id = 'EASE2_NH'
elif 'SH25kmEASE2' in file_name:
cfg_id = 'EASE2_SH'
elif 'nh_ease-125' in file_name:
cfg_id = 'EASE_NH'
elif 'sh_ease-125' in file_name:
cfg_id = 'EASE_SH'
elif 'nh_ease2-250' in file_name:
cfg_id = 'EASE2_NH'
elif 'sh_ease2-250' in file_name:
cfg_id = 'EASE2_SH'
elif 'nic_weekly_' in file_name:
cfg_id = 'NIC_EASE_NH'
elif 'nh_polstere-100' in file_name:
cfg_id = 'OSISAF_NH'
elif 'sh_polstere-100' in file_name:
cfg_id = 'OSISAF_SH'
# TODO: Add this case as soon as I have access to the dataset!
# elif 'nic_weekly_' in file_name:
# cfg_id = 'NIC_EASE_SH'
else:
raise ValueError('No matching region for file {0}'.format(
file_handle))
return pr.utils.parse_area_file('etc/areas.cfg', cfg_id)[0]
def uncompress(compressed_file, target=cfg.TMP_DIR):
"""
This function is a utility function to uncompress NetCDF files in
case they are given that way.
The gzipped original is removed after decompression.
:param product_file: str
Path to a zipped ice concentration product in NetCDF product.
:return: str
The path of an uncompressed NetCDF file.
"""
unpacked_filename, extension = os.path.splitext(compressed_file)
if extension == '.gz':
LOG.info('Unpacking {0}'.format(compressed_file))
if not os.path.isfile(unpacked_filename):
with gzip.open(compressed_file, 'rb') as packed_file:
with open(unpacked_filename, 'wb') as unpacked_file:
unpacked_file.write(packed_file.read())
# os.remove(compressed_file)
return unpacked_filename, []
elif extension == '.zip':
LOG.info('Unpacking {0}'.format(compressed_file))
tmp_id = str(uuid.uuid4())
temporary_files_folder = os.path.join(target, tmp_id)
with open(compressed_file, 'rb') as packed_file:
with ZipFile(packed_file) as z:
for name in z.namelist():
if name.endswith('.shp'):
unpacked_shapefile = os.path.join(
temporary_files_folder, name)
try:
z.extract(name, temporary_files_folder)
except Exception, e:
LOG.exception(e)
LOG.error('Could not uncompress {0}'.format(name))
return unpacked_shapefile, [temporary_files_folder]
else:
return compressed_file, []
def dump_data(ref_time, eval_data, orig_data, orig_file):
hemisphere = 'NH'
if '_sh_' in os.path.basename(orig_file) or \
'_SH_' in os.path.basename(orig_file):
hemisphere = 'SH'
out_path = os.path.join(cfg.OUTPUT_DIR, ref_time)
if not os.path.exists(out_path):
os.makedirs(out_path)
eval_data_img = Image.fromarray(eval_data.astype(np.uint8))
fname = os.path.join(out_path, '{0}_{1}_eval_data.bmp'.format(
cfg.VALIDATION_ID, hemisphere))
eval_data_img.save(fname)
eval_data.dump(fname.replace('.bmp', '.pkl'))
orig_data_img = Image.fromarray(orig_data.astype(np.uint8))
fname = os.path.join(out_path, '{0}_{1}_orig_data.bmp'.format(
cfg.VALIDATION_ID, hemisphere))
orig_data_img.save(fname)
orig_data.dump(fname.replace('.bmp', '.pkl'))
| HelgeDMI/trollvalidation | trollvalidation/validation_utils.py | Python | apache-2.0 | 5,738 |
#!/usr/bin/env python
# DummyMP - Multiprocessing Library for Dummies!
# Copyright 2014 Albert Huang.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# DummyMP Library - Logging Redirect Handler
# multiprocessing library for dummies!
# (library for easily running functions in parallel)
#
import logging
import config
import os
class DummyMPLogHandler(logging.Handler):
"""DummyMP logging handler to allow multiprocess logging.
This class is a custom logging handler to allow spawned processes
(from :py:mod:`multiprocessing`) to log without any issues. This
works by intercepting emitted log records, and sending them via
queue to the master process. The master process will process each
record and call :py:meth:`logging.Logger.handle` to emit the
logging record at the master process level.
Note that this class can be used as a general multiprocess logging
handler simply by removing the int_pid attribute.
Attributes:
queue (:py:class:`multiprocessing.Queue`): The Queue object to
forward logging records to.
int_pid (int): The internal PID used to reference the process.
"""
def __init__(self, int_pid, queue):
"""Initializes DummyMPLogHandler with the inputted internal PID
and Queue object."""
logging.Handler.__init__(self)
self.queue = queue
self.int_pid = int_pid
def emit(self, record):
"""Method override to forward logging records to the internal
Queue object."""
try:
# Format: [ [queueMsgID, PID, internal PID], record ]
self.queue.put([[config.DUMMYMP_LOG_ID, os.getpid(), self.int_pid], record])
except:
# Something went wrong...
self.handleError(record)
| alberthdev/pyradmon | pyradmon/dummymp/loghandler.py | Python | apache-2.0 | 2,314 |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds an ad customizer feed.
Associates the feed with customer and adds an ad that uses the feed to populate
dynamic data.
"""
from datetime import datetime
from uuid import uuid4
# Import appropriate classes from the client library.
from googleads import adwords
from googleads import errors
FEED_NAME = 'Interplanetary Feed Name %s' % uuid4()
ADGROUPS = [
'INSERT_ADGROUP_ID_1_HERE',
'INSERT_ADGROUP_ID_2_HERE'
]
def CreateAdsWithCustomizations(client, adgroup_ids, feed_name):
"""Creates ExpandedTextAds that use ad customizations for specified AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to.
feed_name: the name of the feed used to apply customizations.
Raises:
GoogleAdsError: if no ExpandedTextAds were added.
"""
# Get the AdGroupAdService
adgroup_ad_service = client.GetService('AdGroupAdService', 'v201809')
expanded_text_ad = {
'xsi_type': 'ExpandedTextAd',
'headlinePart1': 'Luxury Cruise to {=%s.Name}' % feed_name,
'headlinePart2': 'Only {=%s.Price}' % feed_name,
'description': 'Offer ends in {=countdown(%s.Date)}!' % feed_name,
'finalUrls': ['http://www.example.com'],
}
# We add the same ad to both ad groups. When they serve, they will show
# different values, since they match different feed items.
operations = [{
'operator': 'ADD',
'operand': {
'adGroupId': adgroup,
'ad': expanded_text_ad
}
} for adgroup in adgroup_ids]
response = adgroup_ad_service.mutate(operations)
if response and 'value' in response:
for ad in response['value']:
print('Created an ad with ID "%s", type "%s", and status "%s".'
% (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
raise errors.GoogleAdsError('No ads were added.')
def CreateCustomizerFeed(client, feed_name):
"""Creates a new AdCustomizerFeed.
Args:
client: an AdWordsClient instance.
feed_name: the name for the new AdCustomizerFeed.
Returns:
The new AdCustomizerFeed.
"""
# Get the AdCustomizerFeedService
ad_customizer_feed_service = client.GetService('AdCustomizerFeedService',
'v201809')
customizer_feed = {
'feedName': feed_name,
'feedAttributes': [
{'type': 'STRING', 'name': 'Name'},
{'type': 'STRING', 'name': 'Price'},
{'type': 'DATE_TIME', 'name': 'Date'}
]
}
feed_service_operation = {
'operator': 'ADD',
'operand': customizer_feed
}
response = ad_customizer_feed_service.mutate([feed_service_operation])
if response and 'value' in response:
feed = response['value'][0]
feed_data = {
'feedId': feed['feedId'],
'nameId': feed['feedAttributes'][0]['id'],
'priceId': feed['feedAttributes'][1]['id'],
'dateId': feed['feedAttributes'][2]['id']
}
print('Feed with name "%s" and ID %s was added with:\n'
'\tName attribute ID %s and price attribute ID %s and date attribute'
'ID %s') % (feed['feedName'], feed['feedId'], feed_data['nameId'],
feed_data['priceId'], feed_data['dateId'])
return feed
else:
raise errors.GoogleAdsError('No feeds were added')
def RestrictFeedItemToAdGroup(client, feed_item, adgroup_id):
"""Restricts the feed item to an ad group.
Args:
client: an AdWordsClient instance.
feed_item: The feed item.
adgroup_id: The ad group ID.
"""
# Get the FeedItemTargetService
feed_item_target_service = client.GetService(
'FeedItemTargetService', 'v201809')
# Optional: Restrict the first feed item to only serve with ads for the
# specified ad group ID.
ad_group_target = {
'xsi_type': 'FeedItemAdGroupTarget',
'feedId': feed_item['feedId'],
'feedItemId': feed_item['feedItemId'],
'adGroupId': adgroup_id
}
operation = {'operator': 'ADD', 'operand': ad_group_target}
response = feed_item_target_service.mutate([operation])
new_ad_group_target = response['value'][0]
print('Feed item target for feed ID %s and feed item ID %s was created to '
'restrict serving to ad group ID %s' %
(new_ad_group_target['feedId'],
new_ad_group_target['feedItemId'],
new_ad_group_target['adGroupId']))
def CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed):
"""Creates FeedItems for the specified AdGroups.
These FeedItems contain values to use in ad customizations for the AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing two AdGroup Ids.
ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems
with.
Raises:
GoogleAdsError: if no FeedItems were added.
"""
# Get the FeedItemService
feed_item_service = client.GetService('FeedItemService', 'v201809')
now = datetime.now()
mars_date = datetime(now.year, now.month, 1, 0, 0)
venus_date = datetime(now.year, now.month, 15, 0, 0)
time_format = '%Y%m%d %H%M%S'
feed_item_operations = [
CreateFeedItemAddOperation(
'Mars', '$1234.56', mars_date.strftime(time_format),
ad_customizer_feed),
CreateFeedItemAddOperation(
'Venus', '$1450.00', venus_date.strftime(time_format),
ad_customizer_feed)
]
response = feed_item_service.mutate(feed_item_operations)
if 'value' in response:
for feed_item in response['value']:
print('Added FeedItem with ID %d.' % feed_item['feedItemId'])
else:
raise errors.GoogleAdsError('No FeedItems were added.')
for feed_item, adgroup_id in zip(response['value'], adgroup_ids):
RestrictFeedItemToAdGroup(client, feed_item, adgroup_id)
def CreateFeedItemAddOperation(name, price, date, ad_customizer_feed):
"""Creates a FeedItemOperation.
The generated FeedItemOperation will create a FeedItem with the specified
values when sent to FeedItemService.mutate.
Args:
name: the value for the name attribute of the FeedItem.
price: the value for the price attribute of the FeedItem.
date: the value for the date attribute of the FeedItem.
ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems
with.
Returns:
A new FeedItemOperation for adding a FeedItem.
"""
feed_item = {
'feedId': ad_customizer_feed['feedId'],
'attributeValues': [
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][0]['id'],
'stringValue': name
},
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][1]['id'],
'stringValue': price
},
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][2]['id'],
'stringValue': date
}
]
}
operation = {
'operator': 'ADD',
'operand': feed_item
}
return operation
def main(client, adgroup_ids, feed_name=FEED_NAME):
# Create a customizer feed. One feed per account can be used for all ads.
ad_customizer_feed = CreateCustomizerFeed(client, feed_name)
# Add feed items containing the values we'd like to place in ads.
CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed)
# All set! We can now create ads with customizations.
CreateAdsWithCustomizations(client, adgroup_ids, feed_name)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUPS)
| googleads/googleads-python-lib | examples/adwords/v201809/advanced_operations/add_ad_customizer.py | Python | apache-2.0 | 8,193 |
import socket
import threading
bind_ip = ""
bind_port = 60007
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
print("[*] Listening on %s:%d" % (bind_ip, bind_port))
def handle_client(client_socket):
request = client_socket.recv(1024).decode()
print("[*] Received: %s" % request)
send_data = "ACK!"
client_socket.send(send_data.encode())
print(client_socket.getpeername())
client_socket.close()
while True:
client, addr = server.accept()
print("[*] Accepted connect from: %s:%d" % (addr[0], addr[1]))
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start() | xieyajie/BackHatPython | backhatpython02/server-tcp.py | Python | apache-2.0 | 707 |
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
"""
command line application and sample code for granting access to a secret.
"""
import argparse
# [START secretmanager_iam_grant_access]
def iam_grant_access(project_id, secret_id, member):
"""
Grant the given member access to a secret.
"""
# Import the Secret Manager client library.
from google.cloud import secretmanager
# Create the Secret Manager client.
client = secretmanager.SecretManagerServiceClient()
# Build the resource name of the secret.
name = client.secret_path(project_id, secret_id)
# Get the current IAM policy.
policy = client.get_iam_policy(request={"resource": name})
# Add the given member with access permissions.
policy.bindings.add(role="roles/secretmanager.secretAccessor", members=[member])
# Update the IAM Policy.
new_policy = client.set_iam_policy(request={"resource": name, "policy": policy})
# Print data about the secret.
print("Updated IAM policy on {}".format(secret_id))
# [END secretmanager_iam_grant_access]
return new_policy
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("project_id", help="id of the GCP project")
parser.add_argument("secret_id", help="id of the secret to get")
parser.add_argument("member", help="member to grant access")
args = parser.parse_args()
iam_grant_access(args.project_id, args.secret_id, args.member)
| googleapis/python-secret-manager | samples/snippets/iam_grant_access.py | Python | apache-2.0 | 2,075 |
from akara.dist import setup
setup(name="basic",
version="1.0",
akara_extensions=["blah.py"]
)
| uogbuji/akara | test/setup_scripts/setup_basic.py | Python | apache-2.0 | 108 |
"""
*** Deprecated Model Builder functionality ***
Supports functional algorithm calls found in the top-level h2o module.
Current modeling is performed via estimator fitting (see estimators sub module for details)
"""
from connection import H2OConnection
from frame import H2OFrame
from job import H2OJob
import h2o
from model.model_future import H2OModelFuture
def supervised_model_build(x=None,y=None,vx=None,vy=None,algo="",offsets=None,weights=None,fold_column=None,kwargs=None):
is_auto_encoder = kwargs is not None and "autoencoder" in kwargs and kwargs["autoencoder"] is not None
if is_auto_encoder and y is not None: raise ValueError("y should not be specified for autoencoder.")
if not is_auto_encoder and y is None: raise ValueError("Missing response")
if vx is not None and vy is None: raise ValueError("Missing response validating a supervised model")
return _model_build(x,y,vx,vy,algo,offsets,weights,fold_column,kwargs)
def supervised(kwargs):
x =_frame_helper(kwargs["x"],kwargs["training_frame"])
y =_frame_helper(kwargs["y"],kwargs["training_frame"])
vx=_frame_helper(kwargs["validation_x"],kwargs["validation_frame"])
vy=_frame_helper(kwargs["validation_y"],kwargs["validation_frame"])
offsets = _ow("offset_column", kwargs)
weights = _ow("weights_column",kwargs)
fold_column= _ow("fold_column", kwargs)
algo = kwargs["algo"]
parms={k:v for k,v in kwargs.items() if (k not in ["x","y","validation_x","validation_y","algo"] and v is not None) or k=="validation_frame"}
return supervised_model_build(x,y,vx,vy,algo,offsets,weights,fold_column,parms)
def unsupervised_model_build(x,validation_x,algo_url,kwargs): return _model_build(x,None,validation_x,None,algo_url,None,None,None,kwargs)
def unsupervised(kwargs):
x = _frame_helper(kwargs["x"],kwargs["training_frame"]) # y is just None
vx=_frame_helper(kwargs["validation_x"],kwargs["validation_frame"])
algo=kwargs["algo"]
parms={k:v for k,v in kwargs.items() if k not in ["x","validation_x","algo"] and v is not None}
return unsupervised_model_build(x,vx,algo,parms)
def _frame_helper(col,fr):
if col is None: return None
if not isinstance(col,H2OFrame):
if fr is None: raise ValueError("Missing training_frame")
return fr[col] if not isinstance(col,H2OFrame) else col
def _ow(name,kwargs): # for checking offsets and weights, c is column, fr is frame
c=kwargs[name]
fr=kwargs["training_frame"]
if c is None or isinstance(c,H2OFrame): res=c
else:
if fr is None: raise ValueError("offsets/weights/fold given, but missing training_frame")
res=fr[c]
kwargs[name] = None if res is None else res.names[0]
if res is not None and kwargs["validation_x"] is not None and kwargs["validation_frame"] is None: # validation frame must have any offsets, weights, folds, etc.
raise ValueError("offsets/weights/fold given, but missing validation_frame")
return res
def _check_frame(x,y,response): # y and response are only ever different for validation
if x is None: return None
if y is not None:
x[response.names[0]] = y
return x
def _check_col(x,vx,vfr,col):
x=_check_frame(x,col,col)
vx= None if vfr is None else _check_frame(vx,vfr[col.names[0]],vfr[col.names[0]])
return x,vx
def _model_build(x,y,vx,vy,algo,offsets,weights,fold_column,kwargs):
if x is None: raise ValueError("Missing features")
x =_check_frame(x,y,y)
vx=_check_frame(vx,vy,y)
if offsets is not None: x,vx=_check_col(x,vx,kwargs["validation_frame"],offsets)
if weights is not None: x,vx=_check_col(x,vx,kwargs["validation_frame"],weights)
if fold_column is not None: x,vx=_check_col(x,vx,kwargs["validation_frame"],fold_column)
kwargs['training_frame']=x.frame_id
if vx is not None: kwargs['validation_frame']=vx.frame_id
if y is not None: kwargs['response_column']=y.names[0]
kwargs = dict([(k, kwargs[k].frame_id if isinstance(kwargs[k], H2OFrame) else kwargs[k]) for k in kwargs if kwargs[k] is not None])
do_future = kwargs.pop("do_future") if "do_future" in kwargs else False
future_model = H2OModelFuture(H2OJob(H2OConnection.post_json("ModelBuilders/"+algo, **kwargs), job_type=(algo+" Model Build")), x)
return future_model if do_future else _resolve_model(future_model, **kwargs)
def _resolve_model(future_model, **kwargs):
future_model.poll()
return h2o.get_model(future_model.job.dest_key)
| madmax983/h2o-3 | h2o-py/h2o/h2o_model_builder.py | Python | apache-2.0 | 4,406 |
# coding=utf-8
import os
def run(cmd, decoding = None, clean = False):
if cmd:
result = []
output = os.popen(cmd).readlines()
for line in output:
if decoding:
line = line.decode(decoding)
if clean:
line = line.strip()
if line and line != '':
result.append(line)
return result
else:
raise ValueError('Command is Empty')
| interhui/py-sys | py_sys/execute.py | Python | apache-2.0 | 477 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import pytest
from pants.core.util_rules import config_files, source_files
from pants.core.util_rules.external_tool import rules as external_tool_rules
from pants.engine.fs import FileDigest
from pants.engine.internals.scheduler import ExecutionError
from pants.jvm.resolve.coursier_fetch import (
CoursierLockfileEntry,
CoursierResolvedLockfile,
MavenCoord,
MavenCoordinates,
MavenRequirements,
ResolvedClasspathEntry,
)
from pants.jvm.resolve.coursier_fetch import rules as coursier_fetch_rules
from pants.jvm.resolve.coursier_setup import rules as coursier_setup_rules
from pants.jvm.target_types import JvmDependencyLockfile
from pants.jvm.util_rules import ExtractFileDigest
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*config_files.rules(),
*coursier_fetch_rules(),
*coursier_setup_rules(),
*external_tool_rules(),
*source_files.rules(),
*util_rules(),
QueryRule(CoursierResolvedLockfile, (MavenRequirements,)),
QueryRule(ResolvedClasspathEntry, (CoursierLockfileEntry,)),
QueryRule(FileDigest, (ExtractFileDigest,)),
],
target_types=[JvmDependencyLockfile],
)
def test_empty_resolve(rule_runner: RuleRunner) -> None:
resolved_lockfile = rule_runner.request(
CoursierResolvedLockfile,
[
MavenRequirements.create_from_maven_coordinates_fields(
fields=(),
)
],
)
assert resolved_lockfile == CoursierResolvedLockfile(entries=())
# TODO(#11928): Make all of these tests more hermetic and not dependent on having a network connection.
def test_resolve_with_no_deps(rule_runner: RuleRunner) -> None:
resolved_lockfile = rule_runner.request(
CoursierResolvedLockfile,
[
MavenRequirements.create_from_maven_coordinates_fields(
fields=(),
additional_requirements=["org.hamcrest:hamcrest-core:1.3"],
)
],
)
assert resolved_lockfile == CoursierResolvedLockfile(
entries=(
CoursierLockfileEntry(
coord=MavenCoord(coord="org.hamcrest:hamcrest-core:1.3"),
file_name="hamcrest-core-1.3.jar",
direct_dependencies=MavenCoordinates([]),
dependencies=MavenCoordinates([]),
file_digest=FileDigest(
fingerprint="66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
serialized_bytes_length=45024,
),
),
)
)
def test_resolve_with_transitive_deps(rule_runner: RuleRunner) -> None:
resolved_lockfile = rule_runner.request(
CoursierResolvedLockfile,
[
MavenRequirements.create_from_maven_coordinates_fields(
fields=(),
additional_requirements=["junit:junit:4.13.2"],
)
],
)
assert resolved_lockfile == CoursierResolvedLockfile(
entries=(
CoursierLockfileEntry(
coord=MavenCoord(coord="junit:junit:4.13.2"),
file_name="junit-4.13.2.jar",
direct_dependencies=MavenCoordinates(
[MavenCoord(coord="org.hamcrest:hamcrest-core:1.3")]
),
dependencies=MavenCoordinates([MavenCoord(coord="org.hamcrest:hamcrest-core:1.3")]),
file_digest=FileDigest(
fingerprint="8e495b634469d64fb8acfa3495a065cbacc8a0fff55ce1e31007be4c16dc57d3",
serialized_bytes_length=384581,
),
),
CoursierLockfileEntry(
coord=MavenCoord(coord="org.hamcrest:hamcrest-core:1.3"),
file_name="hamcrest-core-1.3.jar",
direct_dependencies=MavenCoordinates([]),
dependencies=MavenCoordinates([]),
file_digest=FileDigest(
fingerprint="66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
serialized_bytes_length=45024,
),
),
)
)
def test_resolve_with_inexact_coord(rule_runner: RuleRunner) -> None:
resolved_lockfile = rule_runner.request(
CoursierResolvedLockfile,
[
MavenRequirements.create_from_maven_coordinates_fields(
fields=(),
# Note the open-ended coordinate here. We will still resolve this for the user, but the result
# will be exact and pinned. As noted above, this is an especially brittle unit test, but version
# 4.8 was chosen because it has multiple patch versions and no new versions have been uploaded
# to 4.8.x in over a decade.
additional_requirements=["junit:junit:4.8+"],
)
],
)
assert resolved_lockfile == CoursierResolvedLockfile(
entries=(
CoursierLockfileEntry(
coord=MavenCoord(coord="junit:junit:4.8.2"),
file_name="junit-4.8.2.jar",
direct_dependencies=MavenCoordinates([]),
dependencies=MavenCoordinates([]),
file_digest=FileDigest(
fingerprint="a2aa2c3bb2b72da76c3e6a71531f1eefdc350494819baf2b1d80d7146e020f9e",
serialized_bytes_length=237344,
),
),
)
)
def test_fetch_one_coord_with_no_deps(rule_runner: RuleRunner) -> None:
classpath_entry = rule_runner.request(
ResolvedClasspathEntry,
[
CoursierLockfileEntry(
coord=MavenCoord(coord="org.hamcrest:hamcrest-core:1.3"),
file_name="hamcrest-core-1.3.jar",
direct_dependencies=MavenCoordinates([]),
dependencies=MavenCoordinates([]),
file_digest=FileDigest(
fingerprint="66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
serialized_bytes_length=45024,
),
)
],
)
assert classpath_entry.coord == MavenCoord(coord="org.hamcrest:hamcrest-core:1.3")
assert classpath_entry.file_name == "hamcrest-core-1.3.jar"
file_digest = rule_runner.request(
FileDigest, [ExtractFileDigest(classpath_entry.digest, "hamcrest-core-1.3.jar")]
)
assert file_digest == FileDigest(
fingerprint="66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
serialized_bytes_length=45024,
)
def test_fetch_one_coord_with_transitive_deps(rule_runner: RuleRunner) -> None:
classpath_entry = rule_runner.request(
ResolvedClasspathEntry,
[
CoursierLockfileEntry(
coord=MavenCoord(coord="junit:junit:4.13.2"),
file_name="junit-4.13.2.jar",
direct_dependencies=MavenCoordinates(
[MavenCoord(coord="org.hamcrest:hamcrest-core:1.3")]
),
dependencies=MavenCoordinates([MavenCoord(coord="org.hamcrest:hamcrest-core:1.3")]),
file_digest=FileDigest(
fingerprint="8e495b634469d64fb8acfa3495a065cbacc8a0fff55ce1e31007be4c16dc57d3",
serialized_bytes_length=384581,
),
)
],
)
assert classpath_entry.coord == MavenCoord(coord="junit:junit:4.13.2")
assert classpath_entry.file_name == "junit-4.13.2.jar"
file_digest = rule_runner.request(
FileDigest, [ExtractFileDigest(classpath_entry.digest, "junit-4.13.2.jar")]
)
assert file_digest == FileDigest(
fingerprint="8e495b634469d64fb8acfa3495a065cbacc8a0fff55ce1e31007be4c16dc57d3",
serialized_bytes_length=384581,
)
def test_fetch_one_coord_with_bad_fingerprint(rule_runner: RuleRunner) -> None:
expected_exception_msg = (
r".*?CoursierError:.*?Coursier fetch for .*?hamcrest.*? succeeded.*?"
r"66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9.*?"
r"did not match.*?ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
)
lockfile_entry = CoursierLockfileEntry(
coord=MavenCoord(coord="org.hamcrest:hamcrest-core:1.3"),
file_name="hamcrest-core-1.3.jar",
direct_dependencies=MavenCoordinates([]),
dependencies=MavenCoordinates([]),
file_digest=FileDigest(
fingerprint="ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
serialized_bytes_length=45024,
),
)
with pytest.raises(ExecutionError, match=expected_exception_msg):
rule_runner.request(ResolvedClasspathEntry, [lockfile_entry])
def test_fetch_one_coord_with_bad_length(rule_runner: RuleRunner) -> None:
expected_exception_msg = (
r".*?CoursierError:.*?Coursier fetch for .*?hamcrest.*? succeeded.*?"
r"66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9.*?"
r"serialized_bytes_length=45024.*?"
r"did not match.*?66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9.*?"
r"serialized_bytes_length=1\).*?"
)
lockfile_entry = CoursierLockfileEntry(
coord=MavenCoord(coord="org.hamcrest:hamcrest-core:1.3"),
file_name="hamcrest-core-1.3.jar",
direct_dependencies=MavenCoordinates([]),
dependencies=MavenCoordinates([]),
file_digest=FileDigest(
fingerprint="66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
serialized_bytes_length=1,
),
)
with pytest.raises(ExecutionError, match=expected_exception_msg):
rule_runner.request(ResolvedClasspathEntry, [lockfile_entry])
def test_fetch_one_coord_with_mismatched_coord(rule_runner: RuleRunner) -> None:
"""This test demonstrates that fetch_one_coord is picky about inexact coordinates.
Even though the expected jar was downloaded, the coordinate in the lockfile entry was inexact, meaning
it wasn't an exact string match for the coordinate fetched and reported by Coursier, which is exact.
This shouldn't happen in practice, because these lockfile entries are ultimately derived from Coursier
reports which always give exact coordinate strings.
"""
expected_exception_msg = (
r'Coursier resolved coord.*?"org.hamcrest:hamcrest-core:1.3".*?'
r'does not match requested coord.*?"org.hamcrest:hamcrest-core:1.3\+".*?'
)
lockfile_entry = CoursierLockfileEntry(
coord=MavenCoord(coord="org.hamcrest:hamcrest-core:1.3+"),
file_name="hamcrest-core-1.3.jar",
direct_dependencies=MavenCoordinates([]),
dependencies=MavenCoordinates([]),
file_digest=FileDigest(
fingerprint="66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
serialized_bytes_length=45024,
),
)
with pytest.raises(ExecutionError, match=expected_exception_msg):
rule_runner.request(ResolvedClasspathEntry, [lockfile_entry])
| benjyw/pants | src/python/pants/jvm/resolve/coursier_fetch_integration_test.py | Python | apache-2.0 | 11,431 |
"""
Support for HomeMatic devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/homematic/
"""
import asyncio
from datetime import timedelta
from functools import partial
import logging
import os
import socket
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, CONF_USERNAME, CONF_PASSWORD, CONF_PLATFORM,
CONF_HOSTS, CONF_HOST, ATTR_ENTITY_ID, STATE_UNKNOWN)
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import bind_hass
REQUIREMENTS = ['pyhomematic==0.1.36']
DOMAIN = 'homematic'
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL_HUB = timedelta(seconds=300)
SCAN_INTERVAL_VARIABLES = timedelta(seconds=30)
DISCOVER_SWITCHES = 'homematic.switch'
DISCOVER_LIGHTS = 'homematic.light'
DISCOVER_SENSORS = 'homematic.sensor'
DISCOVER_BINARY_SENSORS = 'homematic.binary_sensor'
DISCOVER_COVER = 'homematic.cover'
DISCOVER_CLIMATE = 'homematic.climate'
ATTR_DISCOVER_DEVICES = 'devices'
ATTR_PARAM = 'param'
ATTR_CHANNEL = 'channel'
ATTR_NAME = 'name'
ATTR_ADDRESS = 'address'
ATTR_VALUE = 'value'
ATTR_INTERFACE = 'interface'
ATTR_ERRORCODE = 'error'
ATTR_MESSAGE = 'message'
ATTR_MODE = 'mode'
ATTR_TIME = 'time'
EVENT_KEYPRESS = 'homematic.keypress'
EVENT_IMPULSE = 'homematic.impulse'
EVENT_ERROR = 'homematic.error'
SERVICE_VIRTUALKEY = 'virtualkey'
SERVICE_RECONNECT = 'reconnect'
SERVICE_SET_VARIABLE_VALUE = 'set_variable_value'
SERVICE_SET_DEVICE_VALUE = 'set_device_value'
SERVICE_SET_INSTALL_MODE = 'set_install_mode'
HM_DEVICE_TYPES = {
DISCOVER_SWITCHES: [
'Switch', 'SwitchPowermeter', 'IOSwitch', 'IPSwitch', 'RFSiren',
'IPSwitchPowermeter', 'KeyMatic', 'HMWIOSwitch', 'Rain', 'EcoLogic'],
DISCOVER_LIGHTS: ['Dimmer', 'KeyDimmer', 'IPKeyDimmer'],
DISCOVER_SENSORS: [
'SwitchPowermeter', 'Motion', 'MotionV2', 'RemoteMotion', 'MotionIP',
'ThermostatWall', 'AreaThermostat', 'RotaryHandleSensor',
'WaterSensor', 'PowermeterGas', 'LuxSensor', 'WeatherSensor',
'WeatherStation', 'ThermostatWall2', 'TemperatureDiffSensor',
'TemperatureSensor', 'CO2Sensor', 'IPSwitchPowermeter', 'HMWIOSwitch',
'FillingLevel', 'ValveDrive', 'EcoLogic', 'IPThermostatWall',
'IPSmoke', 'RFSiren', 'PresenceIP'],
DISCOVER_CLIMATE: [
'Thermostat', 'ThermostatWall', 'MAXThermostat', 'ThermostatWall2',
'MAXWallThermostat', 'IPThermostat', 'IPThermostatWall',
'ThermostatGroup'],
DISCOVER_BINARY_SENSORS: [
'ShutterContact', 'Smoke', 'SmokeV2', 'Motion', 'MotionV2',
'MotionIP', 'RemoteMotion', 'WeatherSensor', 'TiltSensor',
'IPShutterContact', 'HMWIOSwitch', 'MaxShutterContact', 'Rain',
'WiredSensor', 'PresenceIP'],
DISCOVER_COVER: ['Blind', 'KeyBlind']
}
HM_IGNORE_DISCOVERY_NODE = [
'ACTUAL_TEMPERATURE',
'ACTUAL_HUMIDITY'
]
HM_ATTRIBUTE_SUPPORT = {
'LOWBAT': ['battery', {0: 'High', 1: 'Low'}],
'ERROR': ['sabotage', {0: 'No', 1: 'Yes'}],
'RSSI_DEVICE': ['rssi', {}],
'VALVE_STATE': ['valve', {}],
'BATTERY_STATE': ['battery', {}],
'CONTROL_MODE': ['mode', {
0: 'Auto',
1: 'Manual',
2: 'Away',
3: 'Boost',
4: 'Comfort',
5: 'Lowering'
}],
'POWER': ['power', {}],
'CURRENT': ['current', {}],
'VOLTAGE': ['voltage', {}],
'WORKING': ['working', {0: 'No', 1: 'Yes'}],
}
HM_PRESS_EVENTS = [
'PRESS_SHORT',
'PRESS_LONG',
'PRESS_CONT',
'PRESS_LONG_RELEASE',
'PRESS',
]
HM_IMPULSE_EVENTS = [
'SEQUENCE_OK',
]
CONF_RESOLVENAMES_OPTIONS = [
'metadata',
'json',
'xml',
False
]
DATA_HOMEMATIC = 'homematic'
DATA_STORE = 'homematic_store'
DATA_CONF = 'homematic_conf'
CONF_INTERFACES = 'interfaces'
CONF_LOCAL_IP = 'local_ip'
CONF_LOCAL_PORT = 'local_port'
CONF_PORT = 'port'
CONF_PATH = 'path'
CONF_CALLBACK_IP = 'callback_ip'
CONF_CALLBACK_PORT = 'callback_port'
CONF_RESOLVENAMES = 'resolvenames'
CONF_VARIABLES = 'variables'
CONF_DEVICES = 'devices'
CONF_PRIMARY = 'primary'
DEFAULT_LOCAL_IP = '0.0.0.0'
DEFAULT_LOCAL_PORT = 0
DEFAULT_RESOLVENAMES = False
DEFAULT_PORT = 2001
DEFAULT_PATH = ''
DEFAULT_USERNAME = 'Admin'
DEFAULT_PASSWORD = ''
DEVICE_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'homematic',
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_ADDRESS): cv.string,
vol.Required(ATTR_INTERFACE): cv.string,
vol.Optional(ATTR_CHANNEL, default=1): vol.Coerce(int),
vol.Optional(ATTR_PARAM): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_INTERFACES, default={}): {cv.match_all: {
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_RESOLVENAMES, default=DEFAULT_RESOLVENAMES):
vol.In(CONF_RESOLVENAMES_OPTIONS),
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_CALLBACK_IP): cv.string,
vol.Optional(CONF_CALLBACK_PORT): cv.port,
}},
vol.Optional(CONF_HOSTS, default={}): {cv.match_all: {
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
}},
vol.Optional(CONF_LOCAL_IP, default=DEFAULT_LOCAL_IP): cv.string,
vol.Optional(CONF_LOCAL_PORT, default=DEFAULT_LOCAL_PORT): cv.port,
}),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_SERVICE_VIRTUALKEY = vol.Schema({
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): cv.string,
vol.Optional(ATTR_INTERFACE): cv.string,
})
SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema({
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_INTERFACE): cv.string,
})
SCHEMA_SERVICE_RECONNECT = vol.Schema({})
SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema({
vol.Required(ATTR_INTERFACE): cv.string,
vol.Optional(ATTR_TIME, default=60): cv.positive_int,
vol.Optional(ATTR_MODE, default=1):
vol.All(vol.Coerce(int), vol.In([1, 2])),
vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
})
@bind_hass
def virtualkey(hass, address, channel, param, interface=None):
"""Send virtual keypress to homematic controlller."""
data = {
ATTR_ADDRESS: address,
ATTR_CHANNEL: channel,
ATTR_PARAM: param,
ATTR_INTERFACE: interface,
}
hass.services.call(DOMAIN, SERVICE_VIRTUALKEY, data)
@bind_hass
def set_variable_value(hass, entity_id, value):
"""Change value of a Homematic system variable."""
data = {
ATTR_ENTITY_ID: entity_id,
ATTR_VALUE: value,
}
hass.services.call(DOMAIN, SERVICE_SET_VARIABLE_VALUE, data)
@bind_hass
def set_device_value(hass, address, channel, param, value, interface=None):
"""Call setValue XML-RPC method of supplied interface."""
data = {
ATTR_ADDRESS: address,
ATTR_CHANNEL: channel,
ATTR_PARAM: param,
ATTR_VALUE: value,
ATTR_INTERFACE: interface,
}
hass.services.call(DOMAIN, SERVICE_SET_DEVICE_VALUE, data)
@bind_hass
def set_install_mode(hass, interface, mode=None, time=None, address=None):
"""Call setInstallMode XML-RPC method of supplied inteface."""
data = {
key: value for key, value in (
(ATTR_INTERFACE, interface),
(ATTR_MODE, mode),
(ATTR_TIME, time),
(ATTR_ADDRESS, address)
) if value
}
hass.services.call(DOMAIN, SERVICE_SET_INSTALL_MODE, data)
@bind_hass
def reconnect(hass):
"""Reconnect to CCU/Homegear."""
hass.services.call(DOMAIN, SERVICE_RECONNECT, {})
def setup(hass, config):
"""Set up the Homematic component."""
from pyhomematic import HMConnection
conf = config[DOMAIN]
hass.data[DATA_CONF] = remotes = {}
hass.data[DATA_STORE] = set()
# Create hosts-dictionary for pyhomematic
for rname, rconfig in conf[CONF_INTERFACES].items():
remotes[rname] = {
'ip': socket.gethostbyname(rconfig.get(CONF_HOST)),
'port': rconfig.get(CONF_PORT),
'path': rconfig.get(CONF_PATH),
'resolvenames': rconfig.get(CONF_RESOLVENAMES),
'username': rconfig.get(CONF_USERNAME),
'password': rconfig.get(CONF_PASSWORD),
'callbackip': rconfig.get(CONF_CALLBACK_IP),
'callbackport': rconfig.get(CONF_CALLBACK_PORT),
'connect': True,
}
for sname, sconfig in conf[CONF_HOSTS].items():
remotes[sname] = {
'ip': socket.gethostbyname(sconfig.get(CONF_HOST)),
'port': DEFAULT_PORT,
'username': sconfig.get(CONF_USERNAME),
'password': sconfig.get(CONF_PASSWORD),
'connect': False,
}
# Create server thread
bound_system_callback = partial(_system_callback_handler, hass, config)
hass.data[DATA_HOMEMATIC] = homematic = HMConnection(
local=config[DOMAIN].get(CONF_LOCAL_IP),
localport=config[DOMAIN].get(CONF_LOCAL_PORT),
remotes=remotes,
systemcallback=bound_system_callback,
interface_id='homeassistant'
)
# Start server thread, connect to hosts, initialize to receive events
homematic.start()
# Stops server when HASS is shutting down
hass.bus.listen_once(
EVENT_HOMEASSISTANT_STOP, hass.data[DATA_HOMEMATIC].stop)
# Init homematic hubs
entity_hubs = []
for hub_name in conf[CONF_HOSTS].keys():
entity_hubs.append(HMHub(hass, homematic, hub_name))
# Register HomeMatic services
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
def _hm_service_virtualkey(service):
"""Service to handle virtualkey servicecalls."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
# Device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found for service virtualkey!", address)
return
# Parameter doesn't exist for device
if param not in hmdevice.ACTIONNODE:
_LOGGER.error("%s not datapoint in hm device %s", param, address)
return
# Channel doesn't exist for device
if channel not in hmdevice.ACTIONNODE[param]:
_LOGGER.error("%i is not a channel in hm device %s",
channel, address)
return
# Call parameter
hmdevice.actionNodeData(param, True, channel)
hass.services.register(
DOMAIN, SERVICE_VIRTUALKEY, _hm_service_virtualkey,
descriptions[SERVICE_VIRTUALKEY], schema=SCHEMA_SERVICE_VIRTUALKEY)
def _service_handle_value(service):
"""Service to call setValue method for HomeMatic system variable."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
name = service.data[ATTR_NAME]
value = service.data[ATTR_VALUE]
if entity_ids:
entities = [entity for entity in entity_hubs if
entity.entity_id in entity_ids]
else:
entities = entity_hubs
if not entities:
_LOGGER.error("No HomeMatic hubs available")
return
for hub in entities:
hub.hm_set_variable(name, value)
hass.services.register(
DOMAIN, SERVICE_SET_VARIABLE_VALUE, _service_handle_value,
descriptions[SERVICE_SET_VARIABLE_VALUE],
schema=SCHEMA_SERVICE_SET_VARIABLE_VALUE)
def _service_handle_reconnect(service):
"""Service to reconnect all HomeMatic hubs."""
homematic.reconnect()
hass.services.register(
DOMAIN, SERVICE_RECONNECT, _service_handle_reconnect,
descriptions[SERVICE_RECONNECT], schema=SCHEMA_SERVICE_RECONNECT)
def _service_handle_device(service):
"""Service to call setValue method for HomeMatic devices."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
value = service.data.get(ATTR_VALUE)
# Device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found!", address)
return
hmdevice.setValue(param, value, channel)
hass.services.register(
DOMAIN, SERVICE_SET_DEVICE_VALUE, _service_handle_device,
descriptions[SERVICE_SET_DEVICE_VALUE],
schema=SCHEMA_SERVICE_SET_DEVICE_VALUE)
def _service_handle_install_mode(service):
"""Service to set interface into install mode."""
interface = service.data.get(ATTR_INTERFACE)
mode = service.data.get(ATTR_MODE)
time = service.data.get(ATTR_TIME)
address = service.data.get(ATTR_ADDRESS)
homematic.setInstallMode(interface, t=time, mode=mode, address=address)
hass.services.register(
DOMAIN, SERVICE_SET_INSTALL_MODE, _service_handle_install_mode,
descriptions[SERVICE_SET_INSTALL_MODE],
schema=SCHEMA_SERVICE_SET_INSTALL_MODE)
return True
def _system_callback_handler(hass, config, src, *args):
"""System callback handler."""
# New devices available at hub
if src == 'newDevices':
(interface_id, dev_descriptions) = args
interface = interface_id.split('-')[-1]
# Device support active?
if not hass.data[DATA_CONF][interface]['connect']:
return
addresses = []
for dev in dev_descriptions:
address = dev['ADDRESS'].split(':')[0]
if address not in hass.data[DATA_STORE]:
hass.data[DATA_STORE].add(address)
addresses.append(address)
# Register EVENTS
# Search all devices with an EVENTNODE that includes data
bound_event_callback = partial(_hm_event_handler, hass, interface)
for dev in addresses:
hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(dev)
if hmdevice.EVENTNODE:
hmdevice.setEventCallback(
callback=bound_event_callback, bequeath=True)
# Create HASS entities
if addresses:
for component_name, discovery_type in (
('switch', DISCOVER_SWITCHES),
('light', DISCOVER_LIGHTS),
('cover', DISCOVER_COVER),
('binary_sensor', DISCOVER_BINARY_SENSORS),
('sensor', DISCOVER_SENSORS),
('climate', DISCOVER_CLIMATE)):
# Get all devices of a specific type
found_devices = _get_devices(
hass, discovery_type, addresses, interface)
# When devices of this type are found
# they are setup in HASS and an discovery event is fired
if found_devices:
discovery.load_platform(hass, component_name, DOMAIN, {
ATTR_DISCOVER_DEVICES: found_devices
}, config)
# Homegear error message
elif src == 'error':
_LOGGER.error("Error: %s", args)
(interface_id, errorcode, message) = args
hass.bus.fire(EVENT_ERROR, {
ATTR_ERRORCODE: errorcode,
ATTR_MESSAGE: message
})
def _get_devices(hass, discovery_type, keys, interface):
"""Get the HomeMatic devices for given discovery_type."""
device_arr = []
for key in keys:
device = hass.data[DATA_HOMEMATIC].devices[interface][key]
class_name = device.__class__.__name__
metadata = {}
# Class not supported by discovery type
if class_name not in HM_DEVICE_TYPES[discovery_type]:
continue
# Load metadata needed to generate a parameter list
if discovery_type == DISCOVER_SENSORS:
metadata.update(device.SENSORNODE)
elif discovery_type == DISCOVER_BINARY_SENSORS:
metadata.update(device.BINARYNODE)
else:
metadata.update({None: device.ELEMENT})
# Generate options for 1...n elements with 1...n parameters
for param, channels in metadata.items():
if param in HM_IGNORE_DISCOVERY_NODE:
continue
# Add devices
_LOGGER.debug("%s: Handling %s: %s: %s",
discovery_type, key, param, channels)
for channel in channels:
name = _create_ha_name(
name=device.NAME, channel=channel, param=param,
count=len(channels)
)
device_dict = {
CONF_PLATFORM: "homematic",
ATTR_ADDRESS: key,
ATTR_INTERFACE: interface,
ATTR_NAME: name,
ATTR_CHANNEL: channel
}
if param is not None:
device_dict[ATTR_PARAM] = param
# Add new device
try:
DEVICE_SCHEMA(device_dict)
device_arr.append(device_dict)
except vol.MultipleInvalid as err:
_LOGGER.error("Invalid device config: %s",
str(err))
return device_arr
def _create_ha_name(name, channel, param, count):
"""Generate a unique entity id."""
# HMDevice is a simple device
if count == 1 and param is None:
return name
# Has multiple elements/channels
if count > 1 and param is None:
return "{} {}".format(name, channel)
# With multiple parameters on first channel
if count == 1 and param is not None:
return "{} {}".format(name, param)
# Multiple parameters with multiple channels
if count > 1 and param is not None:
return "{} {} {}".format(name, channel, param)
def _hm_event_handler(hass, interface, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
try:
channel = int(device.split(":")[1])
address = device.split(":")[0]
hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(address)
except (TypeError, ValueError):
_LOGGER.error("Event handling channel convert error!")
return
# Return if not an event supported by device
if attribute not in hmdevice.EVENTNODE:
return
_LOGGER.debug("Event %s for %s channel %i", attribute,
hmdevice.NAME, channel)
# Keypress event
if attribute in HM_PRESS_EVENTS:
hass.bus.fire(EVENT_KEYPRESS, {
ATTR_NAME: hmdevice.NAME,
ATTR_PARAM: attribute,
ATTR_CHANNEL: channel
})
return
# Impulse event
if attribute in HM_IMPULSE_EVENTS:
hass.bus.fire(EVENT_IMPULSE, {
ATTR_NAME: hmdevice.NAME,
ATTR_CHANNEL: channel
})
return
_LOGGER.warning("Event is unknown and not forwarded")
def _device_from_servicecall(hass, service):
"""Extract HomeMatic device from service call."""
address = service.data.get(ATTR_ADDRESS)
interface = service.data.get(ATTR_INTERFACE)
if address == 'BIDCOS-RF':
address = 'BidCoS-RF'
if interface:
return hass.data[DATA_HOMEMATIC].devices[interface].get(address)
for devices in hass.data[DATA_HOMEMATIC].devices.values():
if address in devices:
return devices[address]
class HMHub(Entity):
"""The HomeMatic hub. (CCU2/HomeGear)."""
def __init__(self, hass, homematic, name):
"""Initialize HomeMatic hub."""
self.hass = hass
self.entity_id = "{}.{}".format(DOMAIN, name.lower())
self._homematic = homematic
self._variables = {}
self._name = name
self._state = None
# Load data
self.hass.helpers.event.track_time_interval(
self._update_hub, SCAN_INTERVAL_HUB)
self.hass.add_job(self._update_hub, None)
self.hass.helpers.event.track_time_interval(
self._update_variables, SCAN_INTERVAL_VARIABLES)
self.hass.add_job(self._update_variables, None)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return false. HomeMatic Hub object updates variables."""
return False
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
attr = self._variables.copy()
return attr
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:gradient"
def _update_hub(self, now):
"""Retrieve latest state."""
service_message = self._homematic.getServiceMessages(self._name)
state = None if service_message is None else len(service_message)
# state have change?
if self._state != state:
self._state = state
self.schedule_update_ha_state()
def _update_variables(self, now):
"""Retrive all variable data and update hmvariable states."""
variables = self._homematic.getAllSystemVariables(self._name)
if variables is None:
return
state_change = False
for key, value in variables.items():
if key in self._variables and value == self._variables[key]:
continue
state_change = True
self._variables.update({key: value})
if state_change:
self.schedule_update_ha_state()
def hm_set_variable(self, name, value):
"""Set variable value on CCU/Homegear."""
if name not in self._variables:
_LOGGER.error("Variable %s not found on %s", name, self.name)
return
old_value = self._variables.get(name)
if isinstance(old_value, bool):
value = cv.boolean(value)
else:
value = float(value)
self._homematic.setSystemVariable(self.name, name, value)
self._variables.update({name: value})
self.schedule_update_ha_state()
class HMDevice(Entity):
"""The HomeMatic device base object."""
def __init__(self, config):
"""Initialize a generic HomeMatic device."""
self._name = config.get(ATTR_NAME)
self._address = config.get(ATTR_ADDRESS)
self._interface = config.get(ATTR_INTERFACE)
self._channel = config.get(ATTR_CHANNEL)
self._state = config.get(ATTR_PARAM)
self._data = {}
self._homematic = None
self._hmdevice = None
self._connected = False
self._available = False
# Set parameter to uppercase
if self._state:
self._state = self._state.upper()
@asyncio.coroutine
def async_added_to_hass(self):
"""Load data init callbacks."""
yield from self.hass.async_add_job(self.link_homematic)
@property
def should_poll(self):
"""Return false. HomeMatic states are pushed by the XML-RPC Server."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def available(self):
"""Return true if device is available."""
return self._available
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attr = {}
# Generate a dictionary with attributes
for node, data in HM_ATTRIBUTE_SUPPORT.items():
# Is an attribute and exists for this object
if node in self._data:
value = data[1].get(self._data[node], self._data[node])
attr[data[0]] = value
# Static attributes
attr['id'] = self._hmdevice.ADDRESS
attr['interface'] = self._interface
return attr
def link_homematic(self):
"""Connect to HomeMatic."""
if self._connected:
return True
# Initialize
self._homematic = self.hass.data[DATA_HOMEMATIC]
self._hmdevice = \
self._homematic.devices[self._interface][self._address]
self._connected = True
try:
# Initialize datapoints of this object
self._init_data()
self._load_data_from_hm()
# Link events from pyhomematic
self._subscribe_homematic_events()
self._available = not self._hmdevice.UNREACH
# pylint: disable=broad-except
except Exception as err:
self._connected = False
_LOGGER.error("Exception while linking %s: %s",
self._address, str(err))
def _hm_event_callback(self, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
_LOGGER.debug("%s received event '%s' value: %s", self._name,
attribute, value)
has_changed = False
# Is data needed for this instance?
if attribute in self._data:
# Did data change?
if self._data[attribute] != value:
self._data[attribute] = value
has_changed = True
# Availability has changed
if attribute == 'UNREACH':
self._available = bool(value)
has_changed = True
elif not self.available:
self._available = False
has_changed = True
# If it has changed data point, update HASS
if has_changed:
self.schedule_update_ha_state()
def _subscribe_homematic_events(self):
"""Subscribe all required events to handle job."""
channels_to_sub = set()
channels_to_sub.add(0) # Add channel 0 for UNREACH
# Push data to channels_to_sub from hmdevice metadata
for metadata in (self._hmdevice.SENSORNODE, self._hmdevice.BINARYNODE,
self._hmdevice.ATTRIBUTENODE,
self._hmdevice.WRITENODE, self._hmdevice.EVENTNODE,
self._hmdevice.ACTIONNODE):
for node, channels in metadata.items():
# Data is needed for this instance
if node in self._data:
# chan is current channel
if len(channels) == 1:
channel = channels[0]
else:
channel = self._channel
# Prepare for subscription
try:
channels_to_sub.add(int(channel))
except (ValueError, TypeError):
_LOGGER.error("Invalid channel in metadata from %s",
self._name)
# Set callbacks
for channel in channels_to_sub:
_LOGGER.debug(
"Subscribe channel %d from %s", channel, self._name)
self._hmdevice.setEventCallback(
callback=self._hm_event_callback, bequeath=False,
channel=channel)
def _load_data_from_hm(self):
"""Load first value from pyhomematic."""
if not self._connected:
return False
# Read data from pyhomematic
for metadata, funct in (
(self._hmdevice.ATTRIBUTENODE,
self._hmdevice.getAttributeData),
(self._hmdevice.WRITENODE, self._hmdevice.getWriteData),
(self._hmdevice.SENSORNODE, self._hmdevice.getSensorData),
(self._hmdevice.BINARYNODE, self._hmdevice.getBinaryData)):
for node in metadata:
if metadata[node] and node in self._data:
self._data[node] = funct(name=node, channel=self._channel)
return True
def _hm_set_state(self, value):
"""Set data to main datapoint."""
if self._state in self._data:
self._data[self._state] = value
def _hm_get_state(self):
"""Get data from main datapoint."""
if self._state in self._data:
return self._data[self._state]
return None
def _init_data(self):
"""Generate a data dict (self._data) from the HomeMatic metadata."""
# Add all attributes to data dictionary
for data_note in self._hmdevice.ATTRIBUTENODE:
self._data.update({data_note: STATE_UNKNOWN})
# Initialize device specific data
self._init_data_struct()
def _init_data_struct(self):
"""Generate a data dictionary from the HomeMatic device metadata."""
raise NotImplementedError
| ewandor/home-assistant | homeassistant/components/homematic/__init__.py | Python | apache-2.0 | 29,766 |
from JumpScale import j
# from SpecModelactorsGenerator import SpecModelactorsGenerator
class Specbase(j.tools.code.classGetBase()):
def __init__(self, linenr):
self.name = ""
self.description = ""
self.tags = ""
self.comment = ""
self.linenr = linenr
def addDefaults(self):
pass
def getDefaultValue(self, type, value):
if value.strip() == "" or value.strip() == "None":
return None
elif j.data.types.string.check(value):
value = value.strip("\"")
if type == 'int' and value:
return int(value)
elif type == 'bool' and value:
return j.data.types.bool.fromString(value)
return value
class SpecEnum(Specbase):
def __init__(self, name, specpath, linenr):
Specbase.__init__(self, linenr)
self.name = name
self.specpath = specpath
self.appname = ""
self.enums = []
self.actorname = ""
def _parse(self, parser, content):
for line in content.split("\n"):
if line.strip() != "":
self.enums.append(line.strip())
class Specactor(Specbase):
def __init__(self, name, descr, tags, specpath, linenr):
Specbase.__init__(self, linenr)
self.name = name
self.description = descr
self.tags = tags
self.specpath = specpath
self.appname = ""
self.methods = []
self.type = ""
def _addItem(self, obj):
self.methods.append(obj)
def addDefaults(self):
pass
class SpecactorMethod(Specbase):
def __init__(self, linenr):
Specbase.__init__(self, linenr)
self.vars = []
self.result = None
def _parseFirstLine(self, parser, line):
self.comment, self.tags, line = parser.getTagsComment(line)
self.name = line.strip()
def _parse(self, parser, content):
content = parser.deIndent(content, self.linenr)
linenr = self.linenr
for line in content.split("\n"):
linenr += 1
line0 = line
if line.strip() == "" or line.strip()[0] == "#":
continue
if line.find(":") != -1:
comments, tags, line = parser.getTagsComment(line)
errormsg = "Syntax error, right syntax var:$name $type,$defaultvalue,$description @tags #remarks"
try:
varname, line = line.split(":", 1)
except:
return parser.raiseError(errormsg, line0, linenr)
if varname == "var":
if line.find(" ") == -1:
return parser.raiseError(errormsg, line0, linenr)
else:
varname, line = line.split(" ", 1)
try:
ttype, default, descr = line.split(",", 2)
except:
return parser.raiseError(errormsg, line0, linenr)
default = self.getDefaultValue(ttype, default)
spec = SpecactorMethodVar(
varname, descr, tags, linenr, default, ttype)
spec.comment = comments
self.vars.append(spec)
elif varname == "result":
errormsg = "Syntax error, right syntax result:$type @tags #remarks"
if line.find(" ") == -1 and (line.find("@") != -1 or line.find("$") != -1):
return parser.raiseError(errormsg, line0, linenr)
if line.find(" ") == -1:
ttype = line
else:
ttype, line = line.split(" ", 1)
self.result = Specbase(linenr)
self.result.type = ttype
self.result.comment = comments
else:
return parser.raiseError("Only var & result support on line, syntaxerror.", line0, linenr)
class SpecactorMethodVar(Specbase):
def __init__(self, name, descr, tags, linenr, default, ttype):
Specbase.__init__(self, linenr)
self.name = name
self.description = descr
self.tags = tags
self.defaultvalue = default
self.ttype = ttype
class SpecModel(Specbase):
def __init__(self, name, descr, tags, specpath, linenr):
Specbase.__init__(self, linenr)
self.name = name
self.description = descr
self.tags = tags
self.specpath = specpath
self.properties = []
self.type = ""
self.actorname = ""
self.rootobject = False
def _addItem(self, obj):
self.properties.append(obj)
def exists(self, propname):
for prop in self.properties:
if str(prop.name) == propname:
return True
return False
def addDefaults(self):
if self.type == "rootmodel":
if not self.exists("id"):
s = SpecModelProperty(0)
s.type = 'int'
s.name = 'id'
s.description = 'Auto generated id @optional'
self._addItem(s)
class SpecModelProperty(Specbase):
def __init__(self, linenr):
Specbase.__init__(self, linenr)
self.default = None
self.type = None
def _parseFirstLine(self, parser, line):
errormsg = "Syntax error, right syntax prop:$name $type,$defaultvalue,$description @tags #remarks"
line0 = "prop:%s" % line
if line.find(" ") == -1:
return parser.raiseError(errormsg, line0, self.linenr)
else:
self.name, line = line.split(" ", 1)
try:
self.type, self.default, self.description = line.split(",", 2)
self.default = self.getDefaultValue(self.type, self.default)
except:
return parser.raiseError(errormsg, line0, self.linenr)
def _parse(self, parser, content):
pass
class SpecBlock:
"""
generic block of specs identified with starting with [...]
can be multiple types
"""
def __init__(self, parser, line, linenr, appname, actorname):
self.appname = appname
self.actorname = actorname
self.descr = ""
self.content = ""
self.name = ""
self.comment, self.tags, line = parser.getTagsComment(
line) # get @ out of the block
# if line.find("@") != -1:
# line=line.split("@")[0]
# if line.find("#") != -1:
# line=line.split("#")[0]
line = line.replace("[", "")
if line.find("]") == -1:
return parser.raiseError("each [ on block should finish with ]", line, linenr)
line = line.replace("]", "").strip()
splitted = line.split(":")
splitted = [item.strip().lower() for item in splitted]
self.type = splitted[0]
if len(splitted) == 1:
self.name = ""
elif len(splitted) == 2:
self.name = splitted[1]
else:
return parser.raiseError(
"each [...] on block need to be in format [$type:$name] or [$type], did not find :", line, linenr)
self.parser = parser
self.startline = linenr
self.items = []
def parse(self):
self.content = self.parser.deIndent(self.content, self.startline)
if self.type == "actor":
ttypeId = "method"
spec = None
if len(list(j.core.specparser.specs.keys())) > 0 and self.type == "actor":
key = "%s_%s" % (self.appname, self.actorname)
if key in j.core.specparser.actornames:
spec = j.core.specparser.getactorSpec(
self.appname, self.actorname, False)
if spec is None:
spec = Specactor(self.name, self.descr, self.tags,
self.parser.path, self.startline)
spec.actorname = self.actorname
spec.appname = self.appname
if spec.appname not in j.core.specparser.app_actornames:
j.core.specparser.app_actornames[self.appname] = []
if spec.actorname not in j.core.specparser.app_actornames[self.appname]:
j.core.specparser.app_actornames[
self.appname].append(spec.actorname)
currentitemClass = SpecactorMethod
elif self.type == "enumeration":
ttypeId = "enumeration"
spec = SpecEnum(self.name, self.parser.path, self.startline)
spec.actorname = self.actorname
currentitemClass = None
elif self.type == "model" or self.type == "rootmodel":
ttypeId = "prop"
spec = SpecModel(self.name, self.descr, self.tags,
self.parser.path, self.startline)
spec.actorname = self.actorname
spec.appname = self.appname
spec.name = self.name
if self.type == "rootmodel":
spec.rootobject = True
# print "found model %s %s"%(self.name,self.parser.path)
# print self.content
# print "###########"
currentitemClass = SpecModelProperty
else:
return self.parser.raiseError(
"Invalid type '%s' could not find right type of spec doc, only supported model,actor,enum :" % self.type, self.content, self.startline)
# find the items in the block
linenr = self.startline
state = "start"
currentitemContent = ""
currentitem = None
if self.type == "enumeration":
currentitemContent = self.content
self.content = ""
currentitem = spec
for line in self.content.split("\n"):
linenr += 1
line = line.rstrip()
# print "line:%s state:%s" % (line,state)
if line.strip() == "":
if currentitem is not None and currentitemContent == "":
currentitem.linenr = linenr + 1
continue
if state == "description" and line.strip().find("\"\"\"") == 0:
# end of description
state = "blockfound"
currentitem.linenr = linenr + 1
continue
if state == "description":
currentitem.description += "%s\n" % line.strip()
if (state == "start" or state == "blockfound") and line.strip().find("\"\"\"") == 0:
# found description
state = "description"
continue
if state == "blockfound" and line.strip().find("@") == 0:
# found labels tags on right level
tmp1, currentitem.tags, tmp2 = self.parser.getTagsComment(line)
currentitem.linenr = linenr + 1
continue
if state == "blockfound" and line[0] == " ":
# we are in block & no block descr
currentitemContent += "%s\n" % line
if (state == "start" or state == "blockfound") and line[0] != " " and line.find(":") != -1:
typeOnLine = line.split(":", 1)[0].strip()
if ttypeId == typeOnLine:
state = "blockfound"
if currentitemContent != "":
currentitem._parse(self.parser, currentitemContent)
currentitemContent = ""
currentitem = currentitemClass(linenr)
comment, tags, line = self.parser.getTagsComment(line)
currentitem._parseFirstLine(
self.parser, line.split(":", 1)[1].strip())
if comment != "":
currentitem.comment = comment
if tags != "":
currentitem.tags = tags
spec._addItem(currentitem)
currentitemContent = ""
else:
self.parser.raiseError("Found item %s, only %s supported." % (
typeOnLine, ttypeId), line, linenr)
# are at end of file make sure last item is processed
if currentitemContent != "":
currentitem._parse(self.parser, currentitemContent)
# spec.appname=self.appname
# spec.actorname=self.actorname
spec.type = self.type
spec.addDefaults()
j.core.specparser.addSpec(spec)
def __str__(self):
s = "name:%s\n" % self.name
s += "type:%s\n" % self.type
s += "descr:%s\n" % self.descr
s += "tags:%s\n" % self.tags
s += "content:\n%s\n" % self.content
return s
__repr__ = __str__
class SpecDirParser:
def __init__(self, path, appname, actorname):
self.appname = appname
self.actorname = actorname
self.path = path
files = j.sal.fs.listFilesInDir(self.path, True, "*.spec")
def sortFilesFollowingLength(files):
r = {}
result = []
for item in ["actor", "enum", "model"]:
for p in files:
pp = j.sal.fs.getBaseName(p)
if pp.find(item) == 0:
result.append(p)
files.pop(files.index(p))
for p in files:
if len(p) not in r:
r[len(p)] = []
r[len(p)].append(p)
lkeysSorted = sorted(r.keys())
for lkey in lkeysSorted:
result = result + r[lkey]
return result
files = sortFilesFollowingLength(files)
self.specblocks = {}
for path in files:
if j.sal.fs.getBaseName(path).find("example__") == 0:
continue
parser = j.core.specparser._getSpecFileParser(
path, self.appname, self.actorname)
for key in list(parser.specblocks.keys()):
block = parser.specblocks[key]
self.specblocks[block.type + "_" + block.name] = block
def getSpecBlock(self, type, name):
key = type + "_" + name
if key in self.specblocks:
return self.specblocks[key]
else:
return False
def __str__(self):
s = "path:%s\n" % self.path
for key in list(self.specblocks.keys()):
block = self.specblocks[key]
s += "%s\n\n" % block
return s
__repr__ = __str__
class SpecFileParser:
def __init__(self, path, appname, actorname):
"""
find blocks in file
"""
self.path = path
self.appname = appname
self.actorname = actorname
if self.appname != self.appname.lower().strip():
emsg = "appname %s for specs should be lowercase & no spaces" % self.appname
raise j.exceptions.RuntimeError(
emsg + " {category:spec.nameerror}")
self.contentin = j.sal.fs.fileGetContents(path)
self.contentout = ""
self.specblocks = {} # key is name
state = "start"
# a block starts with [...] and ends with next [] or end of file
state = "start"
linenr = 0
currentblock = None
# content=self.contentin+"\n***END***\n"
for line in self.contentin.split("\n"):
linenr += 1
line = line.rstrip()
# remove empty lines
line = line.replace("\t", " ")
if line.strip() == "" or line.strip()[0] == "#":
if currentblock is not None and currentblock.content == "":
currentblock.startline = linenr + 1
continue
# remove comments from line
# if line.find("#")>0:
# line=line.split("#",1)[0]
if state == "blockfound" and line[0] == "[":
# block ended
state = "start"
if state == "blockdescription" and line.strip().find("\"\"\"") == 0:
# end of description
state = "blockfound"
self.contentout += "%s\n" % line
currentblock.startline = linenr + 2
continue
if state == "blockdescription":
currentblock.descr += "%s\n" % line.strip()
if state == "blockfound" and line.strip().find("\"\"\"") == 0 and currentblock.descr == "":
# found description
state = "blockdescription"
self.contentout += "%s\n" % line
continue
# if state=="blockfound" and self._checkIdentation(line,linenr,1,1) and line.strip().find("@") != -1:
# found labels tags on right level
# if currentblock is not None:
# comments,currentblock.tags,tmp=self.getTagsComment(line)
# currentblock.startline=linenr
# else:
#self.raiseError("Cannot find label & tags when there is no specblock opened [...]",line,linenr)
#self.contentout+="%s\n" % line
# continue
if state == "blockfound":
# we are in block & no block descr
currentblock.content += "%s\n" % line
if state == "start" and line[0] == "[":
state = "blockfound"
# line2=line
# if line2.find("#")>0:
#from JumpScale.core.Shell import ipshellDebug,ipshell
# print "DEBUG NOW jjj"
# ipshell()
# line2=line.split("#",1)[0]
currentblock = SpecBlock(
self, line, linenr + 1, appname=self.appname, actorname=self.actorname)
self.specblocks[currentblock.name] = currentblock
self.contentout += "%s\n" % line
for key in list(self.specblocks.keys()):
block = self.specblocks[key]
block.parse()
# print block.name
def getTagsComment(self, line):
"""
return comment,tags,line
"""
if line.find("#") != -1:
comment = line.split("#", 1)[1]
line = line.split("#", 1)[0]
else:
comment = ""
tags = None
if line.find("@") != -1:
tags = line.split("@", 1)[1]
line = line.split("@", 1)[0]
if comment.find("@") != -1:
tags = comment.split("@", 1)[1]
comment = comment.split("@")[0]
if comment is not None:
comment = comment.strip()
if tags is not None:
tags = tags.strip()
return comment, tags, line
def deIndent(self, content, startline):
# remove garbage & fix identation
content2 = ""
linenr = startline
for line in content.split("\n"):
linenr += 1
if line.strip() == "":
continue
else:
if line.find(" ") != 0:
return self.raiseError("identation error.", line, linenr)
content2 += "%s\n" % line[4:]
return content2
def _checkIdentation(self, line, linenr, minLevel=1, maxLevel=1):
"""
"""
line = line.replace("\t", " ")
ok = True
if(len(line) < maxLevel * 4):
self.raiseError(
"line is too small, there should be max identation of %s" % maxLevel, line, linenr)
for i in range(0, minLevel):
if line[i * 4:(i + 1) * 4] != " ":
ok = False
if line[maxLevel * 4 + 1] == " ":
ok = False
return ok
def raiseError(self, msg, line="", linenr=0):
j.errorconditionhandler.raiseInputError("Cannot parse file %s\nError on line:%s\n%s\n%s\n" % (
self.path, linenr, line, msg), "specparser.input")
class Role(j.tools.code.classGetBase()):
def __init__(self, name, actors=[]):
self.actors = actors
self.name = name
class SpecParserFactory:
def __init__(self):
self.__jslocation__ = "j.core.specparser"
self.specs = {}
self.childspecs = {}
self.appnames = []
self.actornames = []
self.app_actornames = {}
self.modelnames = {} # key = appname_actorname
self.roles = {} # key is appname_rolename
#self.codepath=j.sal.fs.joinPaths( j.dirs.varDir,"actorscode")
def getEnumerationSpec(self, app, actorname, name, die=True):
key = "enumeration_%s_%s_%s" % (app, actorname, name)
if key in self.specs:
return self.specs[key]
else:
if die:
emsg = "Cannot find enumeration with name %s for app %s" % (
name, app)
raise j.exceptions.RuntimeError(
emsg + " {category:specs.enumeration.notfound}")
else:
return False
def getactorSpec(self, app, name, raiseError=True):
key = "actor_%s_%s_%s" % (app, name, "")
if key in self.specs:
return self.specs[key]
else:
if raiseError:
raise j.exceptions.RuntimeError("Cannot find actor with name %s for app %s" % (
name, app) + " {category:specs.actor.notfound}")
else:
return None
def getModelSpec(self, app, actorname, name, die=True):
key = "model_%s_%s_%s" % (app, actorname, name)
key = key.lower()
if key in self.specs:
return self.specs[key]
else:
if die:
emsg = "Cannot find model with name %s for app %s" % (
name, app)
raise j.exceptions.RuntimeError(
emsg + " {category:specs.model.notfound}")
else:
return False
def getChildModelSpec(self, app, actorname, name, die=True):
key = "childmodel_%s_%s_%s" % (app, actorname, name)
key = key.lower()
if key in self.childspecs:
return self.childspecs[key]
else:
if die:
emsg = "Cannot find model with name %s for app %s" % (
name, app)
raise j.exceptions.RuntimeError(
emsg + " {category:specs.model.notfound}")
else:
return False
def getModelNames(self, appname, actorname):
key = "%s_%s" % (appname, actorname)
if key in j.core.specparser.modelnames:
return self.modelnames[key]
else:
return []
def addSpec(self, spec):
if spec.name == spec.actorname:
specname = ""
else:
specname = spec.name
if spec.type == "rootmodel":
if spec.type == "rootmodel":
spec.type = "model"
key = "%s_%s" % (spec.appname, spec.actorname)
if key not in self.modelnames:
self.modelnames[key] = []
if spec.name not in self.modelnames[key]:
self.modelnames[key].append(spec.name)
elif spec.type == "model":
k = "%s_%s_%s_%s" % ("childmodel", spec.appname,
spec.actorname, specname)
self.childspecs[k] = spec
if spec.type == "actor" and specname != "":
print(
"DEBUG NOW addSpec in specparser, cannot have actor with specname != empty")
key = "%s_%s_%s_%s" % (spec.type, spec.appname,
spec.actorname, specname)
if spec.type != spec.type.lower().strip():
emsg = "type %s of spec %s should be lowercase & no spaces" % (
spec.type, key)
# TODO: P2 the errorcondition handler does not deal with this format to escalate categories
raise j.exceptions.RuntimeError(emsg + " {category:specs.input}")
if spec.name != spec.name.lower().strip():
emsg = "name %s of spec %s should be lowercase & no spaces" % (
spec.name, key)
raise j.exceptions.RuntimeError(emsg + " {category:specs.input}")
if spec.appname not in self.appnames:
self.appnames.append(spec.appname)
if spec.actorname == "":
emsg = "actorname cannot be empty for spec:%s" % (spec.name)
raise j.exceptions.RuntimeError(emsg + "\n{category:specs.input}")
if "%s_%s" % (spec.appname, spec.actorname) not in self.actornames:
self.actornames.append("%s_%s" % (spec.appname, spec.actorname))
self.specs[key] = spec
def findSpec(self, query="", appname="", actorname="", specname="", type="", findFromSpec=None, findOnlyOne=True):
"""
do not specify query with one of the other filter criteria
@param query is in dot notation e.g. $appname.$actorname.$modelname ... the items in front are optional
"""
# print "findspec: '%s/%s/%s'"%(appname,actorname,specname)
# print "query:'%s'"%query
spec = findFromSpec
if query != "":
type = ""
if query[0] == "E":
type = "enumeration"
if query[0] == "M":
type = "model"
if query.find("(") != -1 and query.find(")") != -1:
query = query.split("(", 1)[1]
query = query.split(")")[0]
splitted = query.split(".")
# see if we can find an appname
appname = ""
if len(splitted) > 1:
possibleappname = splitted[0]
if possibleappname in j.core.specparser.appnames:
appname = possibleappname
splitted = splitted[1:] # remove the already matched item
# see if we can find an actor
actorname = ""
if len(splitted) > 1:
possibleactor = splitted[0]
if possibleactor in j.core.specparser.actornames:
actorname = possibleactor
splitted = splitted[1:] # remove the already matched item
query = ".".join(splitted)
if query.strip() != "." and query.strip() != "":
specname = query
if actorname == "" and spec is not None:
# no specificiation of actor or app so needs to be local to
# this spec
actorname = spec.actorname
if appname == "" and spec is not None:
# no specificiation of actor or app so needs to be local to
# this spec
appname = spec.appname
result = []
if actorname == specname:
specname = ""
else:
specname = specname
if actorname != "" and appname != "" and specname != "" and type != "":
key = "%s_%s_%s_%s" % (type, appname, actorname, specname)
if key in j.core.specparser.specs:
result = [j.core.specparser.specs[key]]
else:
# not enough specified need to walk over all
for key in list(j.core.specparser.specs.keys()):
spec = j.core.specparser.specs[key]
found = True
if actorname != "" and spec.actorname != actorname:
found = False
if appname != "" and spec.appname != appname:
found = False
if specname != "" and spec.name != specname:
found = False
if type != "" and spec.type != type:
found = False
if found:
result.append(spec)
# print 'actorname:%s'%actorname
if len(result) == 0:
if spec is not None:
emsg = "Could not find spec with query:%s appname:%s actorname:%s name:%s (spec info: '%s'_'%s'_'%s')" % \
(query, appname, actorname, specname,
spec.name, spec.specpath, spec.linenr)
else:
emsg = "Could not find spec with query:'%s' appname:'%s' actorname:'%s' name:'%s' " % \
(query, appname, actorname, specname)
raise j.exceptions.RuntimeError(
emsg + " {category:specs.finderror}")
if findOnlyOne:
if len(result) != 1:
if spec is not None:
emsg = "Found more than 1 spec for search query:%s appname:%s actorname:%s name:%s (spec info: %s_%s_%s)" % \
(query, appname, actorname, specname,
spec.name, spec.specpath, spec.linenr)
else:
emsg = "Found more than 1 spec for search query:%s appname:%s actorname:%s name:%s " % \
(query, appname, actorname, specname)
raise j.exceptions.RuntimeError(
emsg + " {category:specs.finderror}")
else:
result = result[0]
return result
def _getSpecFileParser(self, path, appname, actorname):
return SpecFileParser(path, appname, actorname)
def init(self):
self.__init__()
def removeSpecsForactor(self, appname, actorname):
appname = appname.lower()
actorname = actorname.lower()
if appname in self.appnames:
i = self.appnames.index(appname)
self.appnames.pop(i)
key = "%s_%s" % (appname, actorname)
if key in self.actornames:
# found actor remove the specs
for key2 in list(self.specs.keys()):
type, app, item, remaining = key2.split("_", 3)
if app == appname and item.find(actorname) == 0:
print(("remove specs %s from memory" % key))
self.specs.pop(key2)
i = self.actornames.index(key)
self.actornames.pop(i)
def resetMemNonSystem(self):
self.appnames = ["system"]
for key2 in list(self.specs.keys()):
type, app, item, remaining = key2.split("_", 3)
if app != "system":
self.specs.pop(key2)
for key in self.actornames:
appname, actorname = key.split("_", 1)
if appname != "system":
i = self.actornames.index(key)
self.actornames.pop(i)
def parseSpecs(self, specpath, appname, actorname):
"""
@param specpath if empty will look for path specs in current dir
"""
if not j.sal.fs.exists(specpath):
raise j.exceptions.RuntimeError(
"Cannot find specs on path %s" % specpath)
SpecDirParser(specpath, appname, actorname=actorname)
# generate specs for model actors
# smg=SpecModelactorsGenerator(appname,actorname,specpath)
# smg.generate()
# parse again to include the just generated specs
# SpecDirParser(specpath,appname,actorname=actorname)
def getSpecFromTypeStr(self, appname, actorname, typestr):
"""
@param typestr e.g list(machine.status)
@return $returntype,$spec $returntype=list,dict,object,enum (list & dict can be of primitive types or objects (NOT enums))
"""
if typestr in ["int", "str", "float", "bool"]:
return None, None
elif typestr.find("list") == 0 or typestr.find("dict") == 0:
if typestr.find("list") == 0:
returntype = "list"
else:
returntype = "dict"
typestr = typestr.split("(")[1]
typestr = typestr.split(")")[0]
# print "typestr:%s" % typestr
else:
returntype = "object"
if typestr in ["int", "str", "float", "bool", "list", "dict"]:
spec = typestr
else:
result = self.getEnumerationSpec(
appname, actorname, typestr, die=False)
if result is False:
result = self.getModelSpec(
appname, actorname, typestr, die=False)
if result is False:
if returntype not in ["list", "dict"]:
returntype = "enum"
if result is False:
raise j.exceptions.RuntimeError(
"Cannot find spec for app:%s, actor:%s, with typestr:%s" % (appname, actorname, typestr))
else:
spec = result
return returntype, spec
#raise j.exceptions.RuntimeError("Could not find type:%s in getSpecFromTypeStr" % type)
| Jumpscale/jumpscale_core8 | lib/JumpScale/core/specparser/SpecParser.py | Python | apache-2.0 | 32,720 |
# -*- coding: utf-8 -*-
"""
equip.analysis.dataflow.lattice
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The base lattice implementation (mostly used as semi-lattice).
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
class Lattice(object):
"""
Interface for a lattice element. Practically, we only use the semi-lattice
with the join (V) operator.
"""
def __init__(self):
pass
def init_state(self):
"""
Returns a new initial state.
"""
pass
def join_all(self, *states):
result_state = None
for state in states:
if result_state is None:
result_state = state
else:
result_state = self.join(result_state, state)
return result_state
def join(self, state1, state2):
"""
Returns the result of the V (supremum) between the two states.
"""
pass
def meet_all(self, *states):
result_state = None
for state in states:
if result_state is None:
result_state = state
else:
result_state = self.meet(result_state, state)
return result_state
def meet(self, state1, state2):
"""
Returns the result of the meet \/ (infimum) between the two states.
"""
pass
def lte(self, state1, state2):
"""
This is the <= operator between two lattice elements (states) as defined by:
state1 <= state2 and state2 <= state1 <=> state1 == state2
"""
pass
def top(self):
"""
The top of the lattice.
"""
pass
def bottom(self):
"""
The bottom of the lattice.
"""
pass
| neuroo/equip | equip/analysis/dataflow/lattice.py | Python | apache-2.0 | 1,624 |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
It is possible to perform policy checks on the following user
attributes (obtained through the token): user_id, domain_id or
project_id::
domain_id:<some_value>
Attributes sent along with API calls can be used by the policy engine
(on the right side of the expression), by using the following syntax::
<some_value>:user.id
Contextual attributes of objects identified by their IDs are loaded
from the database. They are also available to the policy engine and
can be checked through the `target` keyword::
<some_value>:target.role.name
All these attributes (related to users, API calls, and context) can be
checked against each other or against constants, be it literals (True,
<a_number>) or strings.
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import ast
import copy
import os
import re
from oslo.config import cfg
from oslo.serialization import jsonutils
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from murano.openstack.common import fileutils
from murano.openstack.common._i18n import _, _LE, _LI
from murano.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('The JSON file that defines policies.')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Default rule. Enforced when a requested rule is not '
'found.')),
cfg.MultiStrOpt('policy_dirs',
default=['policy.d'],
help=_('Directories where policy configuration files are '
'stored. They can be relative to any directory '
'in the search path defined by the config_dir '
'option, or absolute paths. The file defined by '
'policy_file must exist for these directories to '
'be searched.')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
def list_opts():
"""Entry point for oslo.config-generator."""
return [(None, copy.deepcopy(policy_opts))]
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
if isinstance(self.default_rule, dict):
raise KeyError(key)
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule:
raise KeyError(key)
if isinstance(self.default_rule, BaseCheck):
return self.default_rule
# We need to check this or we can get infinite recursion
if self.default_rule not in self:
raise KeyError(key)
elif isinstance(self.default_rule, six.string_types):
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from cache or config file.
:param overwrite: Whether to overwrite existing rules when reload rules
from config file.
"""
def __init__(self, policy_file=None, rules=None,
default_rule=None, use_conf=True, overwrite=True):
self.default_rule = default_rule or CONF.policy_default_rule
self.rules = Rules(rules, self.default_rule)
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
self.use_conf = use_conf
self.overwrite = overwrite
def set_rules(self, rules, overwrite=True, use_conf=False):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from cache or config file.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
self.use_conf = use_conf
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules)
def clear(self):
"""Clears Enforcer rules, policy's cache and policy's path."""
self.set_rules({})
fileutils.delete_cached_file(self.policy_path)
self.default_rule = None
self.policy_path = None
def load_rules(self, force_reload=False):
"""Loads policy_path's rules.
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to reload rules from config file.
"""
if force_reload:
self.use_conf = force_reload
if self.use_conf:
if not self.policy_path:
self.policy_path = self._get_policy_path(self.policy_file)
self._load_policy_file(self.policy_path, force_reload,
overwrite=self.overwrite)
for path in CONF.policy_dirs:
try:
path = self._get_policy_path(path)
except cfg.ConfigFilesNotFoundError:
LOG.info(_LI("Can not find policy directory: %s"), path)
continue
self._walk_through_policy_directory(path,
self._load_policy_file,
force_reload, False)
@staticmethod
def _walk_through_policy_directory(path, func, *args):
# We do not iterate over sub-directories.
policy_files = next(os.walk(path))[2]
policy_files.sort()
for policy_file in [p for p in policy_files if not p.startswith('.')]:
func(os.path.join(path, policy_file), *args)
def _load_policy_file(self, path, force_reload, overwrite=True):
reloaded, data = fileutils.read_cached_file(
path, force_reload=force_reload)
if reloaded or not self.rules or not overwrite:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules, overwrite=overwrite, use_conf=True)
LOG.debug("Rules successfully reloaded")
def _get_policy_path(self, path):
"""Locate the policy json data file/path.
:param path: It's value can be a full path or related path. When
full path specified, this function just returns the full
path. When related path specified, this function will
search configuration directories to find one that exists.
:returns: The policy path
:raises: ConfigFilesNotFoundError if the file/path couldn't
be located.
"""
policy_path = CONF.find_file(path)
if policy_path:
return policy_path
raise cfg.ConfigFilesNotFoundError((path,))
def enforce(self, rule, target, creds, do_raise=False,
exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param rule: A string or BaseCheck instance specifying the rule
to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to enforce() (both
positional and keyword arguments) will be passed to
the exception class. If not specified, PolicyNotAuthorized
will be used.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
self.load_rules()
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds, self)
elif not self.rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug("Rule [%s] doesn't exist" % rule)
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if do_raise and not result:
if exc:
raise exc(*args, **kwargs)
raise PolicyNotAuthorized(rule)
return result
@six.add_metaclass(abc.ABCMeta)
class BaseCheck(object):
"""Abstract base class for Check classes."""
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred, enforcer):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns False (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns True (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks."""
def __init__(self, kind, match):
"""Initiates Check instance.
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""Implements the "not" logical operator.
A policy check that inverts the result of another policy check.
"""
def __init__(self, rule):
"""Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred, enforcer):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred, enforcer)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
"""
def __init__(self, rules):
"""Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred, enforcer):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns True.
"""
def __init__(self, rules):
"""Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred, enforcer):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_LE("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_LE("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""Translates the old list-of-lists syntax into a tree of Check objects.
Provided for backwards compatibility.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, six.string_types):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""Metaclass for the ParseState class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
@six.add_metaclass(ParseStateMeta)
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
reduce() method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
Raises ValueError if the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_LE("Failed to understand rule %s") % rule)
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, six.string_types):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
# Convert instances of object() in target temporarily to
# empty dict to avoid circular reference detection
# errors in jsonutils.dumps().
temp_target = copy.deepcopy(target)
for key in target.keys():
element = target.get(key)
if type(element) is object:
temp_target[key] = {}
data = {'target': jsonutils.dumps(temp_target),
'credentials': jsonutils.dumps(creds)}
post_data = urlparse.urlencode(data)
f = urlrequest.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
True:%(user.enabled)s
'Member':%(role.name)s
"""
try:
match = self.match % target
except KeyError:
# While doing GenericCheck if key not
# present in Target return false
return False
try:
# Try to interpret self.kind as a literal
leftval = ast.literal_eval(self.kind)
except ValueError:
try:
kind_parts = self.kind.split('.')
leftval = creds
for kind_part in kind_parts:
leftval = leftval[kind_part]
except KeyError:
return False
return match == six.text_type(leftval)
| telefonicaid/murano | murano/openstack/common/policy.py | Python | apache-2.0 | 29,785 |
# -*- coding: utf-8 -*-
from model.group import Group
def test_add_group(app):
old_groups = app.group.get_group_list()
group = Group(name="hjhj", header="jhjh", footer="jhjjhhj")
app.group.create(group)
new_groups = app.group.get_group_list()
assert len(old_groups) + 1 == len(new_groups)
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
def test_add_empty_group(app):
old_groups = app.group.get_group_list()
group = Group(name="", header="", footer="")
app.group.create(group)
new_groups = app.group.get_group_list()
assert len(old_groups) + 1 == len(new_groups)
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
| alenasf/Pythontest | test/test_add_group.py | Python | apache-2.0 | 813 |
import pytest
import salt.states.openvswitch_port as openvswitch_port
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {openvswitch_port: {"__opts__": {"test": False}}}
def test_present():
"""
Test to verify that the named port exists on bridge, eventually creates it.
"""
name = "salt"
bridge = "br-salt"
ret = {"name": name, "result": None, "comment": "", "changes": {}}
mock = MagicMock(return_value=True)
mock_l = MagicMock(return_value=["salt"])
mock_n = MagicMock(return_value=[])
with patch.dict(
openvswitch_port.__salt__,
{
"openvswitch.bridge_exists": mock,
"openvswitch.interface_get_type": MagicMock(return_value='""'),
"openvswitch.port_list": mock_l,
},
):
comt = "Port salt already exists."
ret.update({"comment": comt, "result": True})
assert openvswitch_port.present(name, bridge) == ret
with patch.dict(
openvswitch_port.__salt__,
{
"openvswitch.bridge_exists": mock,
"openvswitch.interface_get_type": MagicMock(return_value='""'),
"openvswitch.port_list": mock_n,
"openvswitch.port_add": mock,
},
):
comt = "Port salt created on bridge br-salt."
ret.update(
{
"comment": comt,
"result": True,
"changes": {
"salt": {
"new": "Created port salt on bridge br-salt.",
"old": "No port named salt present.",
},
},
}
)
assert openvswitch_port.present(name, bridge) == ret
with patch.dict(
openvswitch_port.__salt__,
{
"openvswitch.bridge_exists": mock,
"openvswitch.port_list": mock_n,
"openvswitch.port_add": mock,
"openvswitch.interface_get_options": mock_n,
"openvswitch.interface_get_type": MagicMock(return_value=""),
"openvswitch.port_create_gre": mock,
"dig.check_ip": mock,
},
):
comt = "Port salt created on bridge br-salt."
ret.update(
{
"result": True,
"comment": (
"Created GRE tunnel interface salt with remote ip 10.0.0.1 and key"
" 1 on bridge br-salt."
),
"changes": {
"salt": {
"new": (
"Created GRE tunnel interface salt with remote ip 10.0.0.1"
" and key 1 on bridge br-salt."
),
"old": (
"No GRE tunnel interface salt with remote ip 10.0.0.1 and"
" key 1 on bridge br-salt present."
),
},
},
}
)
assert (
openvswitch_port.present(
name, bridge, tunnel_type="gre", id=1, remote="10.0.0.1"
)
== ret
)
| saltstack/salt | tests/pytests/unit/states/test_openvswitch_port.py | Python | apache-2.0 | 3,213 |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
import urllib2
import json
import sys
import logging
import datetime
from datetime import datetime
import time
import re
logger = logging.getLogger("sensei_components")
#
# REST API parameter constants
#
PARAM_OFFSET = "start"
PARAM_COUNT = "rows"
PARAM_QUERY = "q"
PARAM_QUERY_PARAM = "qparam"
PARAM_SORT = "sort"
PARAM_SORT_ASC = "asc"
PARAM_SORT_DESC = "desc"
PARAM_SORT_SCORE = "relevance"
PARAM_SORT_SCORE_REVERSE = "relrev"
PARAM_SORT_DOC = "doc"
PARAM_SORT_DOC_REVERSE = "docrev"
PARAM_FETCH_STORED = "fetchstored"
PARAM_SHOW_EXPLAIN = "showexplain"
PARAM_ROUTE_PARAM = "routeparam"
PARAM_GROUP_BY = "groupby"
PARAM_MAX_PER_GROUP = "maxpergroup"
PARAM_SELECT = "select"
PARAM_SELECT_VAL = "val"
PARAM_SELECT_NOT = "not"
PARAM_SELECT_OP = "op"
PARAM_SELECT_OP_AND = "and"
PARAM_SELECT_OP_OR = "or"
PARAM_SELECT_PROP = "prop"
PARAM_FACET = "facet"
PARAM_DYNAMIC_INIT = "dyn"
PARAM_PARTITIONS = "partitions"
PARAM_FACET_EXPAND = "expand"
PARAM_FACET_MAX = "max"
PARAM_FACET_MINHIT = "minhit"
PARAM_FACET_ORDER = "order"
PARAM_FACET_ORDER_HITS = "hits"
PARAM_FACET_ORDER_VAL = "val"
PARAM_DYNAMIC_TYPE = "type"
PARAM_DYNAMIC_TYPE_STRING = "string"
PARAM_DYNAMIC_TYPE_BYTEARRAY = "bytearray"
PARAM_DYNAMIC_TYPE_BOOL = "boolean"
PARAM_DYNAMIC_TYPE_INT = "int"
PARAM_DYNAMIC_TYPE_LONG = "long"
PARAM_DYNAMIC_TYPE_DOUBLE = "double"
PARAM_DYNAMIC_VAL = "vals"
PARAM_RESULT_PARSEDQUERY = "parsedquery"
PARAM_RESULT_HIT_STORED_FIELDS = "stored"
PARAM_RESULT_HIT_STORED_FIELDS_NAME = "name"
PARAM_RESULT_HIT_STORED_FIELDS_VALUE = "val"
PARAM_RESULT_HIT_EXPLANATION = "explanation"
PARAM_RESULT_FACETS = "facets"
PARAM_RESULT_TID = "tid"
PARAM_RESULT_TOTALDOCS = "totaldocs"
PARAM_RESULT_NUMHITS = "numhits"
PARAM_RESULT_HITS = "hits"
PARAM_RESULT_HIT_UID = "uid"
PARAM_RESULT_HIT_DOCID = "docid"
PARAM_RESULT_HIT_SCORE = "score"
PARAM_RESULT_HIT_SRC_DATA = "srcdata"
PARAM_RESULT_TIME = "time"
PARAM_RESULT_SELECT_LIST = "select_list"
PARAM_SYSINFO_NUMDOCS = "numdocs"
PARAM_SYSINFO_LASTMODIFIED = "lastmodified"
PARAM_SYSINFO_VERSION = "version"
PARAM_SYSINFO_FACETS = "facets"
PARAM_SYSINFO_FACETS_NAME = "name"
PARAM_SYSINFO_FACETS_RUNTIME = "runtime"
PARAM_SYSINFO_FACETS_PROPS = "props"
PARAM_SYSINFO_CLUSTERINFO = "clusterinfo"
PARAM_SYSINFO_CLUSTERINFO_ID = "id"
PARAM_SYSINFO_CLUSTERINFO_PARTITIONS = "partitions"
PARAM_SYSINFO_CLUSTERINFO_NODELINK = "nodelink"
PARAM_SYSINFO_CLUSTERINFO_ADMINLINK = "adminlink"
PARAM_RESULT_HITS_EXPL_VALUE = "value"
PARAM_RESULT_HITS_EXPL_DESC = "description"
PARAM_RESULT_HITS_EXPL_DETAILS = "details"
PARAM_RESULT_FACET_INFO_VALUE = "value"
PARAM_RESULT_FACET_INFO_COUNT = "count"
PARAM_RESULT_FACET_INFO_SELECTED = "selected"
#
# JSON API parameter constants
#
JSON_PARAM_COLUMNS = "columns"
JSON_PARAM_EXPLAIN = "explain"
JSON_PARAM_FACETS = "facets"
JSON_PARAM_FACET_INIT = "facetInit"
JSON_PARAM_FETCH_STORED = "fetchStored"
JSON_PARAM_FETCH_TERM_VECTORS = "fetchTermVectors"
JSON_PARAM_FILTER = "filter"
JSON_PARAM_FROM = "from"
JSON_PARAM_GROUPBY = "groupBy"
JSON_PARAM_PARTITIONS = "partitions"
JSON_PARAM_QUERY = "query"
JSON_PARAM_QUERY_STRING = "query_string"
JSON_PARAM_ROUTEPARAM = "routeParam"
JSON_PARAM_SELECTIONS = "selections"
JSON_PARAM_SIZE = "size"
JSON_PARAM_SORT = "sort"
JSON_PARAM_TOP = "top"
JSON_PARAM_VALUES = "values"
JSON_PARAM_EXCLUDES = "excludes"
JSON_PARAM_OPERATOR = "operator"
JSON_PARAM_NO_OPTIMIZE = "_noOptimize"
# Group by related column names
GROUP_VALUE = "groupvalue"
GROUP_HITS = "grouphits"
# Default constants
DEFAULT_REQUEST_OFFSET = 0
DEFAULT_REQUEST_COUNT = 10
DEFAULT_REQUEST_MAX_PER_GROUP = 10
DEFAULT_FACET_MINHIT = 1
DEFAULT_FACET_MAXHIT = 10
DEFAULT_FACET_ORDER = PARAM_FACET_ORDER_HITS
#
# Utilities for result display
#
def print_line(keys, max_lens, char='-', sep_char='+'):
sys.stdout.write(sep_char)
for key in keys:
sys.stdout.write(char * (max_lens[key] + 2) + sep_char)
sys.stdout.write('\n')
def print_header(keys, max_lens, char='-', sep_char='+'):
print_line(keys, max_lens, char=char, sep_char=sep_char)
sys.stdout.write('|')
for key in keys:
sys.stdout.write(' %s%s |' % (key, ' ' * (max_lens[key] - len(key))))
sys.stdout.write('\n')
print_line(keys, max_lens, char=char, sep_char=sep_char)
def print_footer(keys, max_lens, char='-', sep_char='+'):
print_line(keys, max_lens, char=char, sep_char=sep_char)
def safe_str(obj):
"""Return the byte string representation of obj."""
try:
return str(obj)
except UnicodeEncodeError:
# obj is unicode
return unicode(obj).encode("unicode_escape")
class SenseiClientError(Exception):
"""Exception raised for all errors related to Sensei client."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class SenseiFacet:
def __init__(self,expand=False,minHits=1,maxCounts=10,orderBy=PARAM_FACET_ORDER_HITS):
self.expand = expand
self.minHits = minHits
self.maxCounts = maxCounts
self.orderBy = orderBy
class SenseiSelections:
def __init__(self, type):
self.type = type;
self.selection = {}
def get_type(self):
return self.type
def get_selection(self):
return self.selection
class SenseiQuery:
def __init__(self, type):
self.type = type
self.query = {}
def get_type(self):
return self.type
def get_query(self):
return self.query
class SenseiQueryMatchAll(SenseiQuery):
def __init__(self):
SenseiQuery.__init__(self, "match_all")
self.query={"match_all":{"boost":1.0}}
def set_boost(self, boost):
target = (self.query)["match_all"]
target["boost"]=boost
class SenseiQueryIDs(SenseiQuery):
def __init__(self, values, excludes):
SenseiQuery.__init__(self, "ids")
self.query={"ids" : {"values" : [], "excludes":[], "boost":1.0}}
if isinstance(values, list) and isinstance(excludes, list):
self.query = {"ids" : {"values" : values, "excludes":excludes, "boost":1.0}}
def add_values(self, values):
if self.query.has_key("ids"):
values_excludes = self.query["ids"]
if values_excludes.has_key("values"):
orig_values = values_excludes["values"]
orig_set = set(orig_values)
for new_value in values:
if new_value not in orig_set:
orig_values.append(new_value)
def add_excludes(self, excludes):
if self.query.has_key("ids"):
values_excludes = self.query["ids"]
if values_excludes.has_key("excludes"):
orig_excludes = values_excludes["excludes"]
orig_set = set(orig_excludes)
for new_value in excludes:
if new_value not in orig_set:
orig_excludes.append(new_value)
def set_boost(self, boost):
target = (self.query)["ids"]
target["boost"]=boost
class SenseiQueryString(SenseiQuery):
def __init__(self, query):
SenseiQuery.__init__(self, "query_string")
self.query={"query_string":{"query":query,
"default_field":"contents",
"default_operator":"OR",
"allow_leading_wildcard":True,
"lowercase_expanded_terms":True,
"enable_position_increments":True,
"fuzzy_prefix_length":0,
"fuzzy_min_sim":0.5,
"phrase_slop":0,
"boost":1.0,
"auto_generate_phrase_queries":False,
"fields":[],
"use_dis_max":True,
"tie_breaker":0
}}
def set_field(self, field):
self.query["query_string"]["default_field"]=field
def set_operator(self, operator):
self.query["query_string"]["default_operator"]=operator
def set_allow_leading_wildcard(self, allow_leading_wildcard):
self.query["query_string"]["allow_leading_wildcard"]=allow_leading_wildcard
def set_lowercase_expanded_terms(self, lowercase_expanded_terms):
self.query["query_string"]["lowercase_expanded_terms"]=lowercase_expanded_terms
def set_enable_position_increments(self, enable_position_increments):
self.query["query_string"]["enable_position_increments"]=enable_position_increments
def set_fuzzy_prefix_length(self, fuzzy_prefix_length):
self.query["query_string"]["fuzzy_prefix_length"]=fuzzy_prefix_length
def set_fuzzy_min_sim(self, fuzzy_min_sim):
self.query["query_string"]["fuzzy_min_sim"]=fuzzy_min_sim
def set_phrase_slop(self, phrase_slop):
self.query["query_string"]["phrase_slop"]=phrase_slop
def set_boost(self, boost):
self.query["query_string"]["boost"]=boost
def set_auto_generate_phrase_queries(self, auto_generate_phrase_queries):
self.query["query_string"]["auto_generate_phrase_queries"]=auto_generate_phrase_queries
def set_fields(self, fields):
if isinstance(fields, list):
self.query["query_string"]["fields"]=fields
def set_use_dis_max(self, use_dis_max):
self.query["query_string"]["use_dis_max"]=use_dis_max
def set_tie_breaker(self, tie_breaker):
self.query["query_string"]["tie_breaker"]=tie_breaker
class SenseiQueryText(SenseiQuery):
def __init__(self, message, operator, type):
SenseiQuery.__init__(self, "text")
self.query={"text":{"message":message, "operator":operator, "type":type}}
class SenseiQueryTerm(SenseiQuery):
def __init__(self, column, value):
SenseiQuery.__init__(self, "term")
self.query={"term":{column:{"value":value, "boost":1.0}}}
def set_boost(self, boost):
target = (self.query)["term"]
for column, desc in target.iterms():
desc["boost"]=boost
class SenseiFilter:
def __init__(self, type):
self.type = type
self.filter = {}
def get_type(self):
return self.type
def get_filter(self):
return self.filter
class SenseiFilterIDs(SenseiFilter):
def __init__(self, values, excludes):
SenseiFilter.__init__(self, "ids")
self.filter={"ids" : {"values" : [], "excludes":[]}}
if isinstance(values, list) and isinstance(excludes, list):
self.filter = {"ids" : {"values" : values, "excludes":excludes}}
def add_values(self, values):
if self.filter.has_key("ids"):
values_excludes = self.filter["ids"]
if values_excludes.has_key("values"):
orig_values = values_excludes["values"]
orig_set = set(orig_values)
for new_value in values:
if new_value not in orig_set:
orig_values.append(new_value)
def add_excludes(self, excludes):
if self.filter.has_key("ids"):
values_excludes = self.filter["ids"]
if values_excludes.has_key("excludes"):
orig_excludes = values_excludes["excludes"]
orig_set = set(orig_excludes)
for new_value in excludes:
if new_value not in orig_set:
orig_excludes.append(new_value)
class SenseiFilterBool(SenseiFilter):
def __init__(self, must_filter=None, must_not_filter=None, should_filter=None):
SenseiFilter.__init__(self, "bool");
self.filter = {"bool":{"must":{}, "must_not":{}, "should":{}}}
if must_filter is not None and isinstance(must_filter, SenseiFilter):
target = (self.filter)["bool"]
target["must"]=must_filter
if must_not_filter is not None and isinstance(must_not_filter, SenseiFilter):
target = (self.filter)["bool"]
target["must_not"]=must_not_filter
if should_filter is not None and isinstance(should_filter, SenseiFilter):
target = (self.filter)["bool"]
target["should"]=should_filter
class SenseiFilterAND(SenseiFilter):
def __init__(self, filter_list):
SenseiFilter.__init__(self, "and")
self.filter={"and":[]}
old_filter_list = (self.filter)["and"]
if isinstance(filter_list, list):
for new_filter in filter_list:
if isinstance(new_filter, SenseiFilter):
old_filter_list.append(new_filter.get_filter())
class SenseiFilterOR(SenseiFilter):
def __init__(self, filter_list):
SenseiFilter.__init__(self, "or")
self.filter={"or":[]}
old_filter_list = (self.filter)["or"]
if isinstance(filter_list, list):
for new_filter in filter_list:
if isinstance(new_filter, SenseiFilter):
old_filter_list.append(new_filter.get_filter())
class SenseiFilterTerm(SenseiFilter):
def __init__(self, column, value, noOptimize=False):
SenseiFilter.__init__(self, "term")
self.filter={"term":{column:{"value": value, "_noOptimize":noOptimize}}}
class SenseiFilterTerms(SenseiFilter):
def __init__(self, column, values=None, excludes=None, operator="or", noOptimize=False):
SenseiFilter.__init__(self, "terms")
self.filter={"terms":{}}
if values is not None and isinstance(values, list):
if excludes is not None and isinstance(excludes, list):
# complicated mode
self.filter={"terms":{column:{"values":values, "excludes":excludes, "operator":operator, "_noOptimize":noOptimize}}}
else:
self.filter={"terms":{column:values}}
class SenseiFilterRange(SenseiFilter):
def __init__(self, column, from_val, to_val):
SenseiFilter.__init__(self, "range")
self.filter={"range":{column:{"from":from_val, "to":to_val, "_noOptimize":False}}}
def set_No_optimization(self, type, date_format=None):
range = (self.filter)["range"]
for key, value in range.items():
if value is not None:
value["_type"] = type
value["_noOptimize"] = True
if type == "date" and date_format is not None:
value["_date_format"]=date_format
class SenseiFilterQuery(SenseiFilter):
def __init__(self, query):
SenseiFilter.__init__(self, "query")
self.filter={"query":{}}
if isinstance(query, SenseiQuery):
self.filter={"query": query.get_query()}
class SenseiFilterSelection(SenseiFilter):
def __init__(self, selection):
SenseiFilter.__init__(self, "selection")
self.filter = {"selection":{}}
if isinstance(selection, SenseiSelections):
self.filter={"selection":selection.get_selection()}
class SenseiSelection:
def __init__(self, field, operation=PARAM_SELECT_OP_OR):
self.field = field
self.operation = operation
self.type = None
self.values = []
self.excludes = []
self.properties = {}
def __str__(self):
return ("Selection:%s:%s:%s:%s" %
(self.field, self.operation,
','.join(self.values), ','.join(self.excludes)))
def _get_type(self, value):
if isinstance(value, basestring) and RANGE_REGEX.match(value):
return SELECTION_TYPE_RANGE
else:
return SELECTION_TYPE_SIMPLE
def addSelection(self, value, isNot=False):
val_type = self._get_type(value)
if not self.type:
self.type = val_type
elif self.type != val_type:
raise SenseiClientError("Value (%s) type mismatch for facet %s: "
% (value, self.field))
if isNot:
self.excludes.append(safe_str(value))
else:
self.values.append(safe_str(value))
def removeSelection(self, value, isNot=False):
if isNot:
self.excludes.remove(safe_str(value))
else:
self.values.remove(safe_str(value))
def addProperty(self, name, value):
self.properties[name] = value
def removeProperty(self, name):
del self.properties[name]
def getValues(self):
return self.values
def setValues(self, values):
self.values = []
if len(values) > 0:
for value in values:
self.addSelection(value)
def getExcludes(self):
return self.excludes
def setExcludes(self, excludes):
self.excludes = []
if len(excludes) > 0:
for value in excludes:
self.addSelection(value, True)
def getType(self):
return self.type
def setType(self, val_type):
self.type = val_type
def getSelectNotParam(self):
return "%s.%s.%s" % (PARAM_SELECT, self.field, PARAM_SELECT_NOT)
def getSelectNotParamValues(self):
return ",".join(self.excludes)
def getSelectOpParam(self):
return "%s.%s.%s" % (PARAM_SELECT, self.field, PARAM_SELECT_OP)
def getSelectValParam(self):
return "%s.%s.%s" % (PARAM_SELECT, self.field, PARAM_SELECT_VAL)
def getSelectValParamValues(self):
return ",".join(self.values)
def getSelectPropParam(self):
return "%s.%s.%s" % (PARAM_SELECT, self.field, PARAM_SELECT_PROP)
def getSelectPropParamValues(self):
return ",".join(key + ":" + self.properties.get(key)
for key in self.properties.keys())
class SenseiSort:
def __init__(self, field, reverse=False):
self.field = field
self.dir = None
if not (field == PARAM_SORT_SCORE or
field == PARAM_SORT_SCORE_REVERSE or
field == PARAM_SORT_DOC or
field == PARAM_SORT_DOC_REVERSE):
if reverse:
self.dir = PARAM_SORT_DESC
else:
self.dir = PARAM_SORT_ASC
def __str__(self):
return self.build_sort_field()
def build_sort_field(self):
if self.dir:
return self.field + ":" + self.dir
else:
return self.field
def build_sort_spec(self):
if self.dir:
return {self.field: self.dir}
elif self.field == PARAM_SORT_SCORE:
return "_score"
else:
return self.field
class SenseiFacetInitParams:
"""FacetHandler initialization parameters."""
def __init__(self):
self.bool_map = {}
self.int_map = {}
self.long_map = {}
self.string_map = {}
self.byte_map = {}
self.double_map = {}
# Getters for param names for different types
def get_bool_param_names(self):
return self.bool_map.keys()
def get_int_param_names(self):
return self.int_map.keys()
def get_long_param_names(self):
return self.long_map.keys()
def get_string_param_names(self):
return self.string_map.keys()
def get_byte_param_names(self):
return self.byte_map.keys()
def get_double_param_names(self):
return self.double_map.keys()
# Add param name, values
def put_bool_param(self, key, value):
if isinstance(value, list):
self.bool_map[key] = value
else:
self.bool_map[key] = [value]
def put_int_param(self, key, value):
if isinstance(value, list):
self.int_map[key] = value
else:
self.int_map[key] = [value]
def put_long_param(self, key, value):
if isinstance(value, list):
self.long_map[key] = value
else:
self.long_map[key] = [value]
def put_string_param(self, key, value):
if isinstance(value, list):
self.string_map[key] = value
else:
self.string_map[key] = [value]
def put_byte_param(self, key, value):
if isinstance(value, list):
self.byte_map[key] = value
else:
self.byte_map[key] = [value]
def put_double_param(self, key, value):
if isinstance(value, list):
self.double_map[key] = value
else:
self.double_map[key] = [value]
# Getters of param value(s) based on param names
def get_bool_param(self, key):
return self.bool_map.get(key)
def get_int_param(self, key):
return self.int_map.get(key)
def get_long_param(self, key):
return self.long_map.get(key)
def get_string_param(self, key):
return self.string_map.get(key)
def get_byte_param(self, key):
return self.byte_map.get(key)
def get_double_param(self, key):
return self.double_map.get(key)
class SenseiFacetInfo:
def __init__(self, name, runtime=False, props={}):
self.name = name
self.runtime = runtime
self.props = props
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_runtime(self):
return self.runtime
def set_runtime(self, runtime):
self.runtime = runtime
def get_props(self):
return self.props
def set_props(self, props):
self.props = props
class SenseiNodeInfo:
def __init__(self, id, partitions, node_link, admin_link):
self.id = id
self.partitions = partitions
self.node_link = node_link
self.admin_link = admin_link
def get_id(self):
return self.id
def get_partitions(self):
return self.partitions
def get_node_link(self):
return self.node_link
def get_admin_link(self):
return self.admin_link
class SenseiSystemInfo:
def __init__(self, json_data):
logger.debug("json_data = %s" % json_data)
self.num_docs = int(json_data.get(PARAM_SYSINFO_NUMDOCS))
self.last_modified = long(json_data.get(PARAM_SYSINFO_LASTMODIFIED))
self.version = json_data.get(PARAM_SYSINFO_VERSION)
self.facet_infos = []
for facet in json_data.get(PARAM_SYSINFO_FACETS):
facet_info = SenseiFacetInfo(facet.get(PARAM_SYSINFO_FACETS_NAME),
facet.get(PARAM_SYSINFO_FACETS_RUNTIME),
facet.get(PARAM_SYSINFO_FACETS_PROPS))
self.facet_infos.append(facet_info)
# TODO: get cluster_info
self.cluster_info = None
def display(self):
"""Display sysinfo."""
keys = ["facet_name", "facet_type", "runtime", "column", "column_type", "depends"]
max_lens = None
# XXX add existing flags
def get_max_lens(columns):
max_lens = {}
for column in columns:
max_lens[column] = len(column)
for facet_info in self.facet_infos:
props = facet_info.get_props()
tmp_len = len(facet_info.get_name())
if tmp_len > max_lens["facet_name"]:
max_lens["facet_name"] = tmp_len
tmp_len = len(props.get("type"))
if tmp_len > max_lens["facet_type"]:
max_lens["facet_type"] = tmp_len
# runtime can only contain "true" or "false", so len("runtime")
# is big enough
tmp_len = len(props.get("column"))
if tmp_len > max_lens["column"]:
max_lens["column"] = tmp_len
tmp_len = len(props.get("column_type"))
if tmp_len > max_lens["column_type"]:
max_lens["column_type"] = tmp_len
tmp_len = len(props.get("depends"))
if tmp_len > max_lens["depends"]:
max_lens["depends"] = tmp_len
return max_lens
max_lens = get_max_lens(keys)
print_header(keys, max_lens)
for facet_info in self.facet_infos:
props = facet_info.get_props()
sys.stdout.write('|')
val = facet_info.get_name()
sys.stdout.write(' %s%s |' % (val, ' ' * (max_lens["facet_name"] - len(val))))
val = props.get("type")
sys.stdout.write(' %s%s |' % (val, ' ' * (max_lens["facet_type"] - len(val))))
val = facet_info.get_runtime() and "true" or "false"
sys.stdout.write(' %s%s |' % (val, ' ' * (max_lens["runtime"] - len(val))))
val = props.get("column")
sys.stdout.write(' %s%s |' % (val, ' ' * (max_lens["column"] - len(val))))
val = props.get("column_type")
sys.stdout.write(' %s%s |' % (val, ' ' * (max_lens["column_type"] - len(val))))
val = props.get("depends")
sys.stdout.write(' %s%s |' % (val, ' ' * (max_lens["depends"] - len(val))))
sys.stdout.write('\n')
print_footer(keys, max_lens)
def get_num_docs(self):
return self.num_docs
def set_num_docs(self, num_docs):
self.num_docs = num_docs
def get_last_modified(self):
return self.last_modified
def set_last_modified(self, last_modified):
self.last_modified = last_modified
def get_facet_infos(self):
return self.facet_infos
def set_facet_infos(self, facet_infos):
self.facet_infos = facet_infos
def get_version(self):
return self.version
def set_version(self, version):
self.version = version
def get_cluster_info(self):
return self.cluster_info
def set_cluster_info(self, cluster_info):
self.cluster_info = cluster_info
class SenseiRequest:
def __init__(self,
bql_req=None,
offset=DEFAULT_REQUEST_OFFSET,
count=DEFAULT_REQUEST_COUNT,
max_per_group=DEFAULT_REQUEST_MAX_PER_GROUP,
facet_map=None):
self.qParam = {}
self.explain = False
self.route_param = None
self.prepare_time = 0 # Statement prepare time in milliseconds
self.stmt_type = "unknown"
if bql_req != None:
assert(facet_map)
time1 = datetime.now() # XXX need to move to SenseiClient
# ok, msg = bql_req.merge_selections()
# if not ok:
# raise SenseiClientError(msg)
self.stmt_type = bql_req.get_stmt_type()
if self.stmt_type == "desc":
self.index = bql_req.get_index()
else:
self.query = bql_req.get_query()
self.offset = bql_req.get_offset() or offset
self.count = bql_req.get_count() or count
self.columns = bql_req.get_columns()
self.sorts = bql_req.get_sorts()
self.selections = bql_req.get_selections()
self.filter = bql_req.get_filter()
self.query_pred = bql_req.get_query_pred()
self.facets = bql_req.get_facets()
# PARAM_RESULT_HIT_STORED_FIELDS is a reserved column name. If this
# column is selected, turn on fetch_stored flag automatically.
if (PARAM_RESULT_HIT_STORED_FIELDS in self.columns or
bql_req.get_fetching_stored()):
self.fetch_stored = True
else:
self.fetch_stored = False
self.groupby = bql_req.get_groupby()
self.max_per_group = bql_req.get_max_per_group() or max_per_group
self.facet_init_param_map = bql_req.get_facet_init_param_map()
delta = datetime.now() - time1
self.prepare_time = delta.seconds * 1000 + delta.microseconds / 1000
logger.debug("Prepare time: %sms" % self.prepare_time)
else:
self.query = None
self.offset = offset
self.count = count
self.columns = []
self.sorts = None
self.selections = []
self.filter = {}
self.query_pred = {}
self.facets = {}
self.fetch_stored = False
self.groupby = None
self.max_per_group = max_per_group
self.facet_init_param_map = {}
def set_offset(self, offset):
self.offset = offset
def set_count(self, count):
self.count = count
def set_query(self, query):
self.query = query
def set_explain(self, explain):
self.explain = explain
def set_fetch_stored(self, fetch_stored):
self.fetch_stored = fetch_stored
def set_route_param(self, route_param):
self.route_param = route_param
def set_sorts(self, sorts):
self.sorts = sorts
def append_sort(self, sort):
if isinstance(sort, SenseiSort):
if self.sorts is None:
self.sorts = []
self.sorts.append(sort)
else:
self.sorts.append(sort)
def set_filter(self, filter):
self.filter = filter
def set_selections(self, selections):
self.selections = selections
def append_term_selection(self, column, value):
if self.selections is None:
self.selections = []
term_selection = {"term": {column : {"value" : value}}}
self.selections.append(term_selection)
def append_terms_selection(self, column, values, excludes, operator):
if self.selections is None:
self.selections = []
terms_selection = {"terms": {column : {"value" : value}}}
self.selections.append(term_selection)
def append_range_selection(self, column, from_str="*", to_str="*", include_lower=True, include_upper=True):
if self.selections is None:
self.selections = []
range_selection = {"range":{column:{"to":to_str, "from":from_str, "include_lower":include_lower, "include_upper":include_upper}}}
self.selections.append(range_selection)
def append_path_selection(self, column, value, strict=False, depth=1):
if self.selections is None:
self.selections = []
path_selection = {"path": {column : {"value":value, "strict":strict, "depth":depth}}}
self.selections.append(path_selection)
def set_facets(self, facets):
self.facets = facets
def set_groupby(self, groupby):
self.groupby = groupby
def set_max_per_group(self, max_per_group):
self.max_per_group = max_per_group
def set_facet_init_param_map(self, facet_init_param_map):
self.facet_init_param_map = facet_init_param_map
def get_columns(self):
return self.columns
class SenseiHit:
def __init__(self):
self.docid = None
self.uid = None
self.srcData = {}
self.score = None
self.explanation = None
self.stored = None
def load(self, jsonHit):
self.docid = jsonHit.get(PARAM_RESULT_HIT_DOCID)
self.uid = jsonHit.get(PARAM_RESULT_HIT_UID)
self.score = jsonHit.get(PARAM_RESULT_HIT_SCORE)
srcStr = jsonHit.get(PARAM_RESULT_HIT_SRC_DATA)
self.explanation = jsonHit.get(PARAM_RESULT_HIT_EXPLANATION)
self.stored = jsonHit.get(PARAM_RESULT_HIT_STORED_FIELDS)
if srcStr:
self.srcData = json.loads(srcStr)
else:
self.srcData = None
class SenseiResultFacet:
value = None
count = None
selected = None
def load(self,json):
self.value=json.get(PARAM_RESULT_FACET_INFO_VALUE)
self.count=json.get(PARAM_RESULT_FACET_INFO_COUNT)
self.selected=json.get(PARAM_RESULT_FACET_INFO_SELECTED,False)
class SenseiResult:
"""Sensei search results for a query."""
def __init__(self, json_data):
logger.debug("json_data = %s" % json_data)
self.jsonMap = json_data
self.parsedQuery = json_data.get(PARAM_RESULT_PARSEDQUERY)
self.totalDocs = json_data.get(PARAM_RESULT_TOTALDOCS, 0)
self.time = json_data.get(PARAM_RESULT_TIME, 0)
self.total_time = 0
self.numHits = json_data.get(PARAM_RESULT_NUMHITS, 0)
self.hits = json_data.get(PARAM_RESULT_HITS)
self.error = json_data.get("error")
map = json_data.get(PARAM_RESULT_FACETS)
self.facetMap = {}
if map:
for k, v in map.items():
facetList = []
for facet in v:
facetObj = SenseiResultFacet()
facetObj.load(facet)
facetList.append(facetObj)
self.facetMap[k]=facetList
def display(self, columns=['*'], max_col_width=40):
"""Print the results in SQL SELECT result format."""
keys = []
max_lens = None
has_group_hits = False
def get_max_lens(columns):
max_lens = {}
has_group_hits = False
for col in columns:
max_lens[col] = len(col)
for hit in self.hits:
group_hits = [hit]
if hit.has_key(GROUP_HITS):
group_hits = hit.get(GROUP_HITS)
has_group_hits = True
for group_hit in group_hits:
for col in columns:
if group_hit.has_key(col):
v = group_hit.get(col)
else:
v = '<Not Found>'
if isinstance(v, list):
v = ','.join([safe_str(item) for item in v])
elif isinstance(v, (int, long, float)):
v = str(v)
value_len = len(v)
if value_len > max_lens[col]:
max_lens[col] = min(value_len, max_col_width)
return max_lens, has_group_hits
if not self.hits:
print "No hit is found."
return
elif not columns:
print "No column is selected."
return
if len(columns) == 1 and columns[0] == '*':
keys = self.hits[0].keys()
if GROUP_HITS in keys:
keys.remove(GROUP_HITS)
if GROUP_VALUE in keys:
keys.remove(GROUP_VALUE)
if PARAM_RESULT_HIT_SRC_DATA in keys:
keys.remove(PARAM_RESULT_HIT_SRC_DATA)
else:
keys = columns
max_lens, has_group_hits = get_max_lens(keys)
print_header(keys, max_lens,
has_group_hits and '=' or '-',
has_group_hits and '=' or '+')
# Print the results
for hit in self.hits:
group_hits = [hit]
if hit.has_key(GROUP_HITS):
group_hits = hit.get(GROUP_HITS)
for group_hit in group_hits:
sys.stdout.write('|')
for key in keys:
if group_hit.has_key(key):
v = group_hit.get(key)
else:
v = '<Not Found>'
if isinstance(v, list):
v = ','.join([safe_str(item) for item in v])
elif isinstance(v, (int, float, long)):
v = str(v)
else:
# The value may contain unicode characters
v = safe_str(v)
if len(v) > max_col_width:
v = v[:max_col_width]
sys.stdout.write(' %s%s |' % (v, ' ' * (max_lens[key] - len(v))))
sys.stdout.write('\n')
if has_group_hits:
print_line(keys, max_lens)
print_footer(keys, max_lens,
has_group_hits and '=' or '-',
has_group_hits and '=' or '+')
sys.stdout.write('%s %s%s in set, %s hit%s, %s total doc%s (server: %sms, total: %sms)\n' %
(len(self.hits),
has_group_hits and 'group' or 'row',
len(self.hits) > 1 and 's' or '',
self.numHits,
self.numHits > 1 and 's' or '',
self.totalDocs,
self.totalDocs > 1 and 's' or '',
self.time,
self.total_time
))
# Print facet information
for facet, values in self.jsonMap.get(PARAM_RESULT_FACETS).iteritems():
max_val_len = len(facet)
max_count_len = 1
for val in values:
max_val_len = max(max_val_len, min(max_col_width, len(val.get('value'))))
max_count_len = max(max_count_len, len(str(val.get('count'))))
total_len = max_val_len + 2 + max_count_len + 3
sys.stdout.write('+' + '-' * total_len + '+\n')
sys.stdout.write('| ' + facet + ' ' * (total_len - len(facet) - 1) + '|\n')
sys.stdout.write('+' + '-' * total_len + '+\n')
for val in values:
sys.stdout.write('| %s%s (%s)%s |\n' %
(val.get('value'),
' ' * (max_val_len - len(val.get('value'))),
val.get('count'),
' ' * (max_count_len - len(str(val.get('count'))))))
sys.stdout.write('+' + '-' * total_len + '+\n')
| DataDog/sensei | clients/python/sensei/sensei_components.py | Python | apache-2.0 | 35,124 |
#!/usr/bin/python
# Copyright 2017 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from MenderAPI import *
class Inventory():
auth = None
def __init__(self, auth):
self.reset()
self.auth = auth
def reset(self):
# Reset all temporary values.
pass
def get_inv_base_path(self):
return "https://%s/api/management/%s/inventory/" % (get_mender_gateway(), api_version)
def get_devices(self, has_group=None):
"""get_devices API. has_group can be True/False/None string."""
headers = self.auth.get_auth_token()
params = {}
if has_group is not None:
params = ({"has_group": has_group})
ret = requests.get(self.get_inv_base_path() + "devices", params=params, headers=self.auth.get_auth_token(), verify=False)
assert ret.status_code == requests.status_codes.codes.ok
return ret.json()
def get_device(self, device_id):
headers = self.auth.get_auth_token()
devurl = "%s%s/%s" % (self.get_inv_base_path(), "device", device_id)
ret = requests.get(devurl, headers=self.auth.get_auth_token(), verify=False)
return ret
def get_groups(self):
ret = requests.get(self.get_inv_base_path() + "groups", headers=self.auth.get_auth_token(), verify=False)
assert ret.status_code == requests.status_codes.codes.ok
return ret.json()
def get_devices_in_group(self, group):
req = "groups/%s/devices" % group
ret = requests.get(self.get_inv_base_path() + req, headers=self.auth.get_auth_token(), verify=False)
assert ret.status_code == requests.status_codes.codes.ok
return ret.json()
def get_device_group(self, device):
req = "devices/%s/group" % device
ret = requests.get(self.get_inv_base_path() + req, headers=self.auth.get_auth_token(), verify=False)
assert ret.status_code == requests.status_codes.codes.ok
return ret.json()
def put_device_in_group(self, device, group):
headers = {"Content-Type": "application/json"}
headers.update(self.auth.get_auth_token())
body = '{"group":"%s"}' % group
req = "devices/%s/group" % device
ret = requests.put(self.get_inv_base_path() + req, data=body, headers=headers, verify=False)
assert ret.status_code == requests.status_codes.codes.no_content
def delete_device_from_group(self, device, group):
req = "devices/%s/group/%s" % (device, group)
ret = requests.delete(self.get_inv_base_path() + req, headers=self.auth.get_auth_token(), verify=False)
assert ret.status_code == requests.status_codes.codes.no_content
| GregorioDiStefano/integration | tests/MenderAPI/inventory.py | Python | apache-2.0 | 3,224 |
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from . import TestScaleBase
class TestScaleCompute(TestScaleBase):
def test_compute_scale_in_compute(self):
expectations = self.deploy_app('scale4')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_compute_ignore_failure_true(self):
expectations = self.deploy_app('scale_ignore_failure')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'ignore_failure': True,
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_compute_ignore_failure_false(self):
expectations = self.deploy_app('scale_ignore_failure')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
try:
self.scale(parameters={
'scalable_entity_name': 'compute',
'ignore_failure': False,
'delta': -1})
except RuntimeError as e:
self.assertIn(
"RuntimeError: Workflow failed: Task failed "
"'testmockoperations.tasks.mock_stop_failure'",
str(e))
else:
self.fail()
def test_compute_scale_out_and_in_compute_from_0(self):
expectations = self.deploy_app('scale10')
expectations['compute']['new']['install'] = 0
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute'})
expectations['compute']['new']['install'] = 1
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['new']['install'] = 0
expectations['compute']['existing']['install'] = 0
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_2_compute(self):
expectations = self.deploy_app('scale4')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -2})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 2
expectations['compute']['removed']['uninstall'] = 2
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_compute(self):
expectations = self.deploy_app('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_db(self):
expectations = self.deploy_app('scale6')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 2
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'db',
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 1
expectations['db']['removed']['uninstall'] = 1
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_compute(self):
expectations = self.deploy_app('scale6')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 2
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 8
expectations['db']['existing']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_and_out_compute_from_0(self):
expectations = self.deploy_app('scale11')
expectations['compute']['new']['install'] = 0
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 0
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': 1})
expectations['compute']['new']['install'] = 1
expectations['compute']['existing']['install'] = 0
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 0
expectations['db']['existing']['scale_rel_install'] = 2
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['new']['install'] = 0
expectations['compute']['existing']['install'] = 0
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['scale_rel_install'] = 2
expectations['db']['existing']['rel_uninstall'] = 2
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_db_scale_db(self):
expectations = self.deploy_app('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'db',
'delta': -1,
'scale_compute': False})
expectations['compute']['existing']['install'] = 2
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_db(self):
expectations = self.deploy_app('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'db',
'delta': -1,
'scale_compute': True})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
| isaac-s/cloudify-manager | tests/integration_tests/tests/agentless_tests/scale/test_scale_in.py | Python | apache-2.0 | 9,915 |
#!/usr/bin/env python
"""Test for the ee.__init__ file."""
import six
import unittest
import ee
from ee import apitestcase
class EETestCase(apitestcase.ApiTestCase):
def setUp(self):
ee.Reset()
def testInitialization(self):
"""Verifies library initialization."""
def MockSend(path, params, unused_method=None, unused_raw=None):
if path == '/algorithms':
return {}
else:
raise Exception('Unexpected API call to %s with %s' % (path, params))
ee.data.send_ = MockSend
# Verify that the base state is uninitialized.
self.assertFalse(ee.data._initialized)
self.assertEqual(ee.data._api_base_url, None)
self.assertEqual(ee.ApiFunction._api, None)
self.assertFalse(ee.Image._initialized)
# Verify that ee.Initialize() sets the URL and initializes classes.
ee.Initialize(None, 'foo')
self.assertTrue(ee.data._initialized)
self.assertEqual(ee.data._api_base_url, 'foo/api')
self.assertEqual(ee.ApiFunction._api, {})
self.assertTrue(ee.Image._initialized)
# Verify that ee.Initialize(None) does not override custom URLs.
ee.Initialize(None)
self.assertTrue(ee.data._initialized)
self.assertEqual(ee.data._api_base_url, 'foo/api')
# Verify that ee.Reset() reverts everything to the base state.
ee.Reset()
self.assertFalse(ee.data._initialized)
self.assertEqual(ee.data._api_base_url, None)
self.assertEqual(ee.ApiFunction._api, None)
self.assertFalse(ee.Image._initialized)
def testCallAndApply(self):
"""Verifies library initialization."""
# Use a custom set of known functions.
def MockSend(path, params, unused_method=None, unused_raw=None):
if path == '/algorithms':
return {
'fakeFunction': {
'type': 'Algorithm',
'args': [
{'name': 'image1', 'type': 'Image'},
{'name': 'image2', 'type': 'Image'}
],
'returns': 'Image'
},
'Image.constant': apitestcase.BUILTIN_FUNCTIONS['Image.constant']
}
else:
raise Exception('Unexpected API call to %s with %s' % (path, params))
ee.data.send_ = MockSend
ee.Initialize(None)
image1 = ee.Image(1)
image2 = ee.Image(2)
expected = ee.Image(ee.ComputedObject(
ee.ApiFunction.lookup('fakeFunction'),
{'image1': image1, 'image2': image2}))
applied_with_images = ee.apply(
'fakeFunction', {'image1': image1, 'image2': image2})
self.assertEqual(expected, applied_with_images)
applied_with_numbers = ee.apply('fakeFunction', {'image1': 1, 'image2': 2})
self.assertEqual(expected, applied_with_numbers)
called_with_numbers = ee.call('fakeFunction', 1, 2)
self.assertEqual(expected, called_with_numbers)
# Test call and apply() with a custom function.
sig = {'returns': 'Image', 'args': [{'name': 'foo', 'type': 'Image'}]}
func = ee.CustomFunction(sig, lambda foo: ee.call('fakeFunction', 42, foo))
expected_custom_function_call = ee.Image(
ee.ComputedObject(func, {'foo': ee.Image(13)}))
self.assertEqual(expected_custom_function_call, ee.call(func, 13))
self.assertEqual(expected_custom_function_call, ee.apply(func, {'foo': 13}))
# Test None promotion.
called_with_null = ee.call('fakeFunction', None, 1)
self.assertEqual(None, called_with_null.args['image1'])
def testDynamicClasses(self):
"""Verifies dynamic class initialization."""
# Use a custom set of known functions.
def MockSend(path, unused_params, unused_method=None, unused_raw=None):
if path == '/algorithms':
return {
'Array': {
'type': 'Algorithm',
'args': [
{
'name': 'values',
'type': 'Serializable',
'description': ''
}
],
'description': '',
'returns': 'Array'
},
'Array.cos': {
'type': 'Algorithm',
'args': [
{
'type': 'Array',
'description': '',
'name': 'input'
}
],
'description': '',
'returns': 'Array'
},
'Kernel.circle': {
'returns': 'Kernel',
'args': [
{
'type': 'float',
'description': '',
'name': 'radius',
},
{
'default': 1.0,
'type': 'float',
'optional': True,
'description': '',
'name': 'scale'
},
{
'default': True,
'type': 'boolean',
'optional': True,
'description': '',
'name': 'normalize'
}
],
'type': 'Algorithm',
'description': ''
},
'Reducer.mean': {
'returns': 'Reducer',
'args': []
},
'fakeFunction': {
'returns': 'Array',
'args': [
{
'type': 'Reducer',
'description': '',
'name': 'kernel',
}
]
}
}
ee.data.send_ = MockSend
ee.Initialize(None)
# Verify that the expected classes got generated.
self.assertTrue(hasattr(ee, 'Array'))
self.assertTrue(hasattr(ee, 'Kernel'))
self.assertTrue(hasattr(ee.Array, 'cos'))
self.assertTrue(hasattr(ee.Kernel, 'circle'))
# Try out the constructors.
kernel = ee.ApiFunction('Kernel.circle').call(1, 2)
self.assertEqual(kernel, ee.Kernel.circle(1, 2))
array = ee.ApiFunction('Array').call([1, 2])
self.assertEqual(array, ee.Array([1, 2]))
self.assertEqual(array, ee.Array(ee.Array([1, 2])))
# Try out the member function.
self.assertEqual(
ee.ApiFunction('Array.cos').call(array),
ee.Array([1, 2]).cos())
# Test argument promotion.
f1 = ee.ApiFunction('Array.cos').call([1, 2])
f2 = ee.ApiFunction('Array.cos').call(ee.Array([1, 2]))
self.assertEqual(f1, f2)
self.assertTrue(isinstance(f1, ee.Array))
f3 = ee.call('fakeFunction', 'mean')
f4 = ee.call('fakeFunction', ee.Reducer.mean())
self.assertEqual(f3, f4)
try:
ee.call('fakeFunction', 'moo')
self.fail()
except ee.EEException as e:
self.assertTrue('Unknown algorithm: Reducer.moo' in str(e))
def testDynamicConstructor(self):
# Test the behavior of the dynamic class constructor.
# Use a custom set of known functions for classes Foo and Bar.
# Foo Foo(arg1, [arg2])
# Bar Foo.makeBar()
# Bar Foo.takeBar(Bar bar)
# Baz Foo.baz()
def MockSend(path, unused_params, unused_method=None, unused_raw=None):
if path == '/algorithms':
return {
'Foo': {
'returns': 'Foo',
'args': [
{'name': 'arg1', 'type': 'Object'},
{'name': 'arg2', 'type': 'Object', 'optional': True}
]
},
'Foo.makeBar': {
'returns': 'Bar',
'args': [{'name': 'foo', 'type': 'Foo'}]
},
'Foo.takeBar': {
'returns': 'Bar',
'args': [
{'name': 'foo', 'type': 'Foo'},
{'name': 'bar', 'type': 'Bar'}
]
},
'Bar.baz': {
'returns': 'Baz',
'args': [{'name': 'bar', 'type': 'Bar'}]
}
}
ee.data.send_ = MockSend
ee.Initialize(None)
# Try to cast something that's already of the right class.
x = ee.Foo('argument')
self.assertEqual(ee.Foo(x), x)
# Tests for dynamic classes, where there is a constructor.
#
# If there's more than 1 arg, call the constructor.
x = ee.Foo('a')
y = ee.Foo(x, 'b')
ctor = ee.ApiFunction.lookup('Foo')
self.assertEqual(y.func, ctor)
self.assertEqual(y.args, {'arg1': x, 'arg2': 'b'})
# Can't cast a primitive; call the constructor.
self.assertEqual(ctor, ee.Foo(1).func)
# A computed object, but not this class; call the constructor.
self.assertEqual(ctor, ee.Foo(ee.List([1, 2, 3])).func)
# Tests for dynamic classes, where there isn't a constructor.
#
# Foo.makeBar and Foo.takeBar should have caused Bar to be generated.
self.assertTrue(hasattr(ee, 'Bar'))
# Make sure we can create a Bar.
bar = ee.Foo(1).makeBar()
self.assertTrue(isinstance(bar, ee.Bar))
# Now cast something else to a Bar and verify it was just a cast.
cast = ee.Bar(ee.Foo(1))
self.assertTrue(isinstance(cast, ee.Bar))
self.assertEqual(ctor, cast.func)
# We shouldn't be able to cast with more than 1 arg.
try:
ee.Bar(x, 'foo')
self.fail('Expected an exception.')
except ee.EEException as e:
self.assertTrue('Too many arguments for ee.Bar' in str(e))
# We shouldn't be able to cast a primitive.
try:
ee.Bar(1)
self.fail('Expected an exception.')
except ee.EEException as e:
self.assertTrue('Must be a ComputedObject' in str(e))
def testDynamicConstructorCasting(self):
"""Test the behavior of casting with dynamic classes."""
self.InitializeApi()
result = ee.Geometry.Rectangle(1, 1, 2, 2).bounds(0, 'EPSG:4326')
expected = (ee.Geometry.Polygon([[1, 2], [1, 1], [2, 1], [2, 2]])
.bounds(ee.ErrorMargin(0), ee.Projection('EPSG:4326')))
self.assertEqual(expected, result)
def testPromotion(self):
"""Verifies object promotion rules."""
self.InitializeApi()
# Features and Images are both already Elements.
self.assertTrue(isinstance(ee._Promote(ee.Feature(None), 'Element'),
ee.Feature))
self.assertTrue(isinstance(ee._Promote(ee.Image(0), 'Element'), ee.Image))
# Promote an untyped object to an Element.
untyped = ee.ComputedObject('foo', {})
self.assertTrue(isinstance(ee._Promote(untyped, 'Element'), ee.Element))
# Promote an untyped variable to an Element.
untyped = ee.ComputedObject(None, None, 'foo')
self.assertTrue(isinstance(ee._Promote(untyped, 'Element'), ee.Element))
self.assertEqual('foo', ee._Promote(untyped, 'Element').varName)
def testUnboundMethods(self):
"""Verifies unbound method attachment to ee.Algorithms."""
# Use a custom set of known functions.
def MockSend(path, unused_params, unused_method=None, unused_raw=None):
if path == '/algorithms':
return {
'Foo': {
'type': 'Algorithm',
'args': [],
'description': '',
'returns': 'Object'
},
'Foo.bar': {
'type': 'Algorithm',
'args': [],
'description': '',
'returns': 'Object'
},
'Quux.baz': {
'type': 'Algorithm',
'args': [],
'description': '',
'returns': 'Object'
},
'last': {
'type': 'Algorithm',
'args': [],
'description': '',
'returns': 'Object'
}
}
ee.data.send_ = MockSend
ee.ApiFunction.importApi(lambda: None, 'Quux', 'Quux')
ee._InitializeUnboundMethods()
self.assertTrue(callable(ee.Algorithms.Foo))
self.assertTrue(callable(ee.Algorithms.Foo.bar))
self.assertTrue('Quux' not in ee.Algorithms)
self.assertEqual(ee.call('Foo.bar'), ee.Algorithms.Foo.bar())
self.assertNotEqual(ee.Algorithms.Foo.bar(), ee.Algorithms.last())
def testNonAsciiDocumentation(self):
"""Verifies that non-ASCII characters in documentation work."""
foo = u'\uFB00\u00F6\u01EB'
bar = u'b\u00E4r'
baz = u'b\u00E2\u00DF'
def MockSend(path, unused_params, unused_method=None, unused_raw=None):
if path == '/algorithms':
return {
'Foo': {
'type': 'Algorithm',
'args': [],
'description': foo,
'returns': 'Object'
},
'Image.bar': {
'type': 'Algorithm',
'args': [{
'name': 'bar',
'type': 'Bar',
'description': bar
}],
'description': '',
'returns': 'Object'
},
'Image.oldBar': {
'type': 'Algorithm',
'args': [],
'description': foo,
'returns': 'Object',
'deprecated': 'Causes fire'
},
'Image.baz': {
'type': 'Algorithm',
'args': [],
'description': baz,
'returns': 'Object'
}
}
ee.data.send_ = MockSend
ee.Initialize(None)
# The initialisation shouldn't blow up.
self.assertTrue(callable(ee.Algorithms.Foo))
self.assertTrue(callable(ee.Image.bar))
self.assertTrue(callable(ee.Image.baz))
self.assertTrue(callable(ee.Image.baz))
# In Python 2, the docstrings end up UTF-8 encoded. In Python 3, they remain
# Unicode.
if six.PY3:
self.assertEqual(ee.Algorithms.Foo.__doc__, foo)
self.assertIn(foo, ee.Image.oldBar.__doc__)
self.assertIn('DEPRECATED: Causes fire', ee.Image.oldBar.__doc__)
self.assertEqual(ee.Image.bar.__doc__, '\n\nArgs:\n bar: ' + bar)
self.assertEqual(ee.Image.baz.__doc__, baz)
else:
self.assertEqual(ee.Algorithms.Foo.__doc__,
'\xef\xac\x80\xc3\xb6\xc7\xab')
self.assertIn('\xef\xac\x80\xc3\xb6\xc7\xab', ee.Image.oldBar.__doc__)
self.assertIn('DEPRECATED: Causes fire', ee.Image.oldBar.__doc__)
self.assertEqual(ee.Image.bar.__doc__, '\n\nArgs:\n bar: b\xc3\xa4r')
self.assertEqual(ee.Image.baz.__doc__, 'b\xc3\xa2\xc3\x9f')
def testDatePromtion(self):
# Make a feature, put a time in it, and get it out as a date.
self.InitializeApi()
point = ee.Geometry.Point(1, 2)
feature = ee.Feature(point, {'x': 1, 'y': 2})
date_range = ee.call('DateRange', feature.get('x'), feature.get('y'))
# Check that the start and end args are wrapped in a call to Date.
self.assertEqual(date_range.args['start'].func._signature['name'], 'Date')
self.assertEqual(date_range.args['end'].func._signature['name'], 'Date')
if __name__ == '__main__':
unittest.main()
| tylere/earthengine-api | python/ee/tests/ee_test.py | Python | apache-2.0 | 15,097 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""Tests for nitroml.automl.metalearning.metalearner.component."""
from absl.testing import absltest
from nitroml.automl.metalearning import artifacts
from nitroml.automl.metalearning.metalearner.component import MetaLearner
from tfx.types import channel_utils
from tfx.types import standard_artifacts
class ComponentTest(absltest.TestCase):
def setUp(self):
super(ComponentTest, self).setUp()
num_train = 5
self.meta_train_data = {}
for ix in range(num_train):
self.meta_train_data[f'hparams_train_{ix}'] = channel_utils.as_channel(
[standard_artifacts.HyperParameters()])
self.meta_train_data[
f'meta_train_features_{ix}'] = channel_utils.as_channel(
[artifacts.MetaFeatures()])
self.custom_config = {'some': 'thing', 'some other': 1, 'thing': 2}
def testConstructWithMajorityVoting(self):
metalearner = MetaLearner(
algorithm='majority_voting',
custom_config=self.custom_config,
**self.meta_train_data)
self.assertEqual(artifacts.KCandidateHyperParameters.TYPE_NAME,
metalearner.outputs['output_hyperparameters'].type_name)
self.assertEqual(standard_artifacts.Model.TYPE_NAME,
metalearner.outputs['metamodel'].type_name)
def testConstructWithNearestNeighbor(self):
metalearner = MetaLearner(
algorithm='nearest_neighbor',
custom_config=self.custom_config,
**self.meta_train_data)
self.assertEqual(artifacts.KCandidateHyperParameters.TYPE_NAME,
metalearner.outputs['output_hyperparameters'].type_name)
self.assertEqual(standard_artifacts.Model.TYPE_NAME,
metalearner.outputs['metamodel'].type_name)
if __name__ == '__main__':
absltest.main()
| google/nitroml | nitroml/automl/metalearning/metalearner/component_test.py | Python | apache-2.0 | 2,460 |
#!/usr/bin/python3
"""Run 'adb devices' and show results in friendly way.
Runs 'adb devices' and integrates the results with environment
variables DEVTAGS and ANDROID_SERIAL to show model numbers for
connected devices.
"""
import getopt
import os
import re
import sys
import script_utils as u
valid_dispositions = {"device": 1,
"unauthorized": 1}
flag_showall = False
def read_devtags():
"""Read and post-process DEVTAGS environment var."""
dt = os.getenv("DEVTAGS")
chunks = dt.split(" ")
sertotag = {}
tagtoser = {}
for chunk in chunks:
(tag, ser) = chunk.split(":")
if ser in sertotag:
u.error("malformed DEVTAGS (more than one "
"entry for serial number %s" % ser)
if tag in tagtoser:
u.warning("malformed DEVTAGS (more than one "
"serial number for tag %s" % tag)
sertotag[ser] = tag
tagtoser[tag] = ser
return (sertotag, tagtoser)
def perform():
"""Main driver routine."""
andser = os.getenv("ANDROID_SERIAL")
if andser:
andser = andser.strip()
else:
andser = ""
(serial_to_tag, tag_to_serial) = read_devtags()
lines = u.docmdlines("adb devices")
rxd1 = re.compile(r"^\* daemon not running.+$")
rxd2 = re.compile(r"^\* daemon started.+$")
rx1 = re.compile(r"^\s*(\S+)\s+(\S+)\s*$")
devices_found = {}
for line in lines[1:]:
if rxd1.match(line) or rxd2.match(line):
continue
m = rx1.match(line)
if not m:
u.warning("unable to match adb output line: %s" % line)
continue
ser = m.group(1)
disp = m.group(2)
if disp not in valid_dispositions:
u.warning("unknown device disposition %s in adb "
"output line: %s" % (disp, line))
sel = ""
if ser == andser:
sel = ">>"
if ser not in serial_to_tag:
tag = "???"
else:
tag = serial_to_tag[ser]
devices_found[tag] = 1
print("%2s %8s %16s %s" % (sel, tag, ser, disp))
if flag_showall:
for tag, ser in tag_to_serial.items():
if tag in devices_found:
continue
print("%2s %8s %16s %s" % ("", tag, ser, "<unconnected>"))
def usage(msgarg=None):
"""Print usage and exit."""
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print("""\
usage: %s [options]
options:
-d increase debug msg verbosity level
-a show disposition for all devices, not just those connected
""" % os.path.basename(sys.argv[0]))
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_showall
try:
optlist, _ = getopt.getopt(sys.argv[1:], "da")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
for opt, _ in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-a":
flag_showall = True
# ---------main portion of script -------------
u.setdeflanglocale()
parse_args()
# Check to make sure we can run adb
u.doscmd("which adb")
# run
perform()
# done
exit(0)
| thanm/devel-scripts | showdevices.py | Python | apache-2.0 | 2,992 |
import os
import json
import logging
_DEFAULT_CONFIG_DIR = "config"
_DEFAULT_CONFIG_NAME = "config.json"
_DEFAULT_LOG_CONFIG_NAME = "default_log.json"
######################################################################################################
def load(path):
json_content = _load_json(path)
logging.info("loaded configuration from [ %s ]", path)
config = Configuration(json_content)
return config
def _load_json(path):
with file(path) as f:
content = json.load(f)
return content
def get_default_config(path):
conf_dir = get_config_dir(path)
conf_file = os.path.join(conf_dir, _DEFAULT_CONFIG_NAME)
return os.path.abspath(conf_file)
def get_config_dir(path):
dirname = os.path.dirname(path)
config_dir = os.path.join(dirname, _DEFAULT_CONFIG_DIR)
return os.path.abspath(config_dir)
def get_default_log_config(path):
conf_dir = get_config_dir(path)
conf_file = os.path.join(conf_dir, _DEFAULT_LOG_CONFIG_NAME)
return os.path.abspath(conf_file)
######################################################################################################
class ConfigurationException(Exception):
def __init__(self, message, *args):
self.args = args
self.message = message
def _to_string(self):
if self.args:
return str(self.message) % self.args
return str(self.message)
def __str__(self):
return self._to_string()
######################################################################################################
class Configuration:
def __init__(self, content):
self._content = content
######################################################################################################
def get(self, key):
value = self.get_default(key, None)
if value is None:
raise ConfigurationException("can't find configuration key [ %s ] in configuration file [ %s ]",
key,
self._path)
return value
def get_default(self, key, default_value=None):
self._valid_content()
path = map(lambda x: x.strip(), key.split('.'))
value = self._find(self._content, path)
if value is None:
return default_value
return value
######################################################################################################
def as_int(self, key):
value = self.get(key)
return int(value)
def as_int_default(self, key, default_value=None):
value = self.get_default(key, default_value)
return int(value)
def as_float(self, key):
value = self.get(key)
return float(value)
def as_float_default(self, key, default_value=None):
value = self.get_default(key, default_value)
return float(value)
def as_string(self, key):
value = self.get(key)
return str(value)
def as_string_default(self, key, default_value):
value = self.get_default(key, default_value)
return str(value)
######################################################################################################
def _valid_content(self):
if self._content is None:
raise ConfigurationException("configuration content is empty or not loaded!")
def _find(self, element, path):
if not path:
return element
next_key = path[0]
if next_key in element:
path.remove(next_key)
next_element = element[next_key]
return self._find(next_element, path)
return None
| dayaftereh/scripts | python/tcu/lib/config.py | Python | apache-2.0 | 3,690 |
# @license
# Copyright 2020 Google LLC. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# This file is 1/2 of the test suites for CUJ: convert->predict.
#
# This file does below things:
# - Create saved models with TensorFlow.
# - Convert the saved models to tfjs format and store in files.
# - Store inputs in files.
# - Make inference and store outputs in files.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import json
import os
import subprocess
import shutil
import sys
import tempfile
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import tracking
from tensorflow.python.saved_model.save import save
import tensorflow_hub as hub
import tensorflowjs as tfjs
curr_dir = os.path.dirname(os.path.realpath(__file__))
_tmp_dir = os.path.join(curr_dir, 'metadata')
def _create_model_with_metadata():
# Generate model, inputs, and outputs using Tensorflow.
tmp_saved_model_dir = tempfile.mkdtemp()
model_info = _create_saved_model(tmp_saved_model_dir)
metadata1 = {'a': 1}
metadata2 = {'label1': 0, 'label2': 1}
metadata1_path = os.path.join(_tmp_dir, 'metadata1.json')
metadata2_path = os.path.join(_tmp_dir, 'metadata2.json')
with open(metadata1_path, 'w') as f:
f.write(json.dumps(metadata1))
with open(metadata2_path, 'w') as f:
f.write(json.dumps(metadata2))
metadata_option = 'metadata1:'+metadata1_path+','+'metadata2:'+metadata2_path
# Convert and store model to file.
args = [
'tensorflowjs_converter',
'--input_format', 'tf_saved_model',
'--output_format', 'tfjs_graph_model',
'--signature_name', 'serving_default',
'--saved_model_tags', 'serve',
'--metadata', metadata_option];
print(args, tmp_saved_model_dir, _tmp_dir)
subprocess.check_output(args +[tmp_saved_model_dir, _tmp_dir])
def _create_saved_model(save_dir):
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
to_save = root.f.get_concrete_function(input_data)
save(root, save_dir, to_save)
return {
"async": False,
"inputs": {
"x": {"value": [1], "shape": [1], "dtype": 'float32'}},
"outputs": {
"Identity:0": {"value": [6], "shape": [1], "dtype": "float32"}}}
def main():
# Create the directory to store model and data.
if os.path.exists(_tmp_dir) and os.path.isdir(_tmp_dir):
shutil.rmtree(_tmp_dir)
os.mkdir(_tmp_dir)
_create_model_with_metadata()
if __name__ == '__main__':
main()
| tensorflow/tfjs | e2e/integration_tests/metadata.py | Python | apache-2.0 | 3,552 |
"""Test config flow."""
from unittest.mock import Mock, patch
from homeassistant.components.hassio.handler import HassioAPIError
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.setup import async_setup_component
from tests.common import mock_coro
async def test_hassio_discovery_startup(hass, aioclient_mock, hassio_client):
"""Test startup and discovery after event."""
aioclient_mock.get(
"http://127.0.0.1/discovery",
json={
"result": "ok",
"data": {
"discovery": [
{
"service": "mqtt",
"uuid": "test",
"addon": "mosquitto",
"config": {
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
},
}
]
},
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/mosquitto/info",
json={"result": "ok", "data": {"name": "Mosquitto Test"}},
)
assert aioclient_mock.call_count == 0
with patch(
"homeassistant.components.mqtt." "config_flow.FlowHandler.async_step_hassio",
Mock(return_value=mock_coro({"type": "abort"})),
) as mock_mqtt:
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 2
assert mock_mqtt.called
mock_mqtt.assert_called_with(
{
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
"addon": "Mosquitto Test",
}
)
async def test_hassio_discovery_startup_done(hass, aioclient_mock, hassio_client):
"""Test startup and discovery with hass discovery."""
aioclient_mock.get(
"http://127.0.0.1/discovery",
json={
"result": "ok",
"data": {
"discovery": [
{
"service": "mqtt",
"uuid": "test",
"addon": "mosquitto",
"config": {
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
},
}
]
},
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/mosquitto/info",
json={"result": "ok", "data": {"name": "Mosquitto Test"}},
)
with patch(
"homeassistant.components.hassio.HassIO.update_hass_api",
Mock(return_value=mock_coro({"result": "ok"})),
), patch(
"homeassistant.components.hassio.HassIO." "get_homeassistant_info",
Mock(side_effect=HassioAPIError()),
), patch(
"homeassistant.components.mqtt." "config_flow.FlowHandler.async_step_hassio",
Mock(return_value=mock_coro({"type": "abort"})),
) as mock_mqtt:
await hass.async_start()
await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
assert aioclient_mock.call_count == 2
assert mock_mqtt.called
mock_mqtt.assert_called_with(
{
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
"addon": "Mosquitto Test",
}
)
async def test_hassio_discovery_webhook(hass, aioclient_mock, hassio_client):
"""Test discovery webhook."""
aioclient_mock.get(
"http://127.0.0.1/discovery/testuuid",
json={
"result": "ok",
"data": {
"service": "mqtt",
"uuid": "test",
"addon": "mosquitto",
"config": {
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
},
},
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/mosquitto/info",
json={"result": "ok", "data": {"name": "Mosquitto Test"}},
)
with patch(
"homeassistant.components.mqtt." "config_flow.FlowHandler.async_step_hassio",
Mock(return_value=mock_coro({"type": "abort"})),
) as mock_mqtt:
resp = await hassio_client.post(
"/api/hassio_push/discovery/testuuid",
json={"addon": "mosquitto", "service": "mqtt", "uuid": "testuuid"},
)
await hass.async_block_till_done()
assert resp.status == 200
assert aioclient_mock.call_count == 2
assert mock_mqtt.called
mock_mqtt.assert_called_with(
{
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
"addon": "Mosquitto Test",
}
)
| leppa/home-assistant | tests/components/hassio/test_discovery.py | Python | apache-2.0 | 5,530 |
import sys
import numpy as np
class ChromStats(object):
def __init__(self):
self.n = 0
self.n_nan = 0
self.sum = 0
self.min = None
self.max = None
def mean(self):
"""Calculates mean of sites that are not nan
on this chromsome"""
n = self.n - self.n_nan
if n == 0:
return np.inf
return self.sum / float(n)
def set_from_vals(self, vals):
self.n = vals.size
if str(vals.dtype).startswith('float'):
nan_vals = np.isnan(vals)
self.n_nan = np.sum(nan_vals)
if self.n_nan < self.n:
self.min = np.min(vals[~nan_vals])
self.max = np.max(vals[~nan_vals])
self.sum = np.sum(vals[~nan_vals])
else:
self.min = np.min(vals)
self.max = np.max(vals)
self.sum = np.sum(vals)
def add(self, other):
self.n += other.n
self.n_nan += other.n_nan
self.sum += other.sum
if (self.min is None) or (other.min is not None and
self.min > other.min):
self.min = other.min
if (self.max is None) or (other.max is not None and
self.max < other.max):
self.max = other.max
def __str__(self):
return "n=%d n_nan=%s min=%s max=%s sum=%s" % \
(self.n, str(self.n_nan), str(self.min), str(self.max),
str(self.sum))
def calc_stats(h5f, chrom_list, verbose=False):
"""Calculates stats for each chromosome in provided list as well
as combined stats."""
combined = ChromStats()
for chrom in chrom_list:
chrom_stat = ChromStats()
node_name = "/%s" % chrom.name
if node_name in h5f:
node = h5f.getNode("/%s" % chrom.name)
vals = node[:]
chrom_stat.set_from_vals(vals)
if verbose:
sys.stderr.write("%s %s\n" % (str(chrom), str(chrom_stat)))
else:
sys.stderr.write("skipping chromosome %s because "
"not present in HDF5 file" % chrom.name)
combined.add(chrom_stat)
return combined
def set_stats(h5f, chrom_list, verbose=False):
"""Calculates stats for each chromosome and entire track and
stores them as attributes on the chromosome nodes. The
provided HDF5 file handle must have been opened in append mode"""
combined = ChromStats()
for chrom in chrom_list:
node_name = "/%s" % chrom.name
if node_name in h5f:
chrom_stat = ChromStats()
node = h5f.getNode(node_name)
chrom_stat.set_from_vals(node[:])
node.attrs.n = chrom_stat.n
node.attrs.n_nan = chrom_stat.n_nan
node.attrs.min = chrom_stat.min
node.attrs.max = chrom_stat.max
node.attrs.sum = chrom_stat.sum
node.flush()
if verbose:
sys.stderr.write("%s %s\n" % (str(chrom), str(chrom_stat)))
combined.add(chrom_stat)
else:
sys.stderr.write("skipping chromosome %s because "
"not present in HDF5 file\n" % chrom.name)
return combined
def get_stats(h5f, chrom_list, verbose=False):
"""Retrieves stats that are stored as attributes for the specified
set of chromosomes."""
combined = ChromStats()
chrom_stat = ChromStats()
for chrom in chrom_list:
node_name = "/%s" % chrom.name
if node_name in h5f:
node = h5f.getNode(node_name)
if 'n' not in node.attrs:
raise ValueError("Stat attributes are not set for track %s"
% track.name)
chrom_stat.n = node.attrs.n
chrom_stat.n_nan = node.attrs.n_nan
chrom_stat.min = node.attrs.min
chrom_stat.max = node.attrs.max
chrom_stat.sum = node.attrs.sum
if verbose:
sys.stderr.write("%s %s\n" % (str(chrom), str(chrom_stat)))
combined.add(chrom_stat)
else:
sys.stderr.write("skipping chromosome %s because "
"not present in HDF5 file\n" % chrom.name)
return combined
| smozaffari/WASP | CHT/chromstat.py | Python | apache-2.0 | 4,398 |
"""
Routines to compute RMSD of all PROT_IND_ files
These routines were developed by:
Rodrigo Antonio Faccioli - rodrigo.faccioli@usp.br / rodrigo.faccioli@gmail.com
Leandro Oliveira Bortot - leandro.bortot@usp.br / leandro.obt@gmail.com
"""
import os
import sys
from collections import OrderedDict
native = "1VII.pdb"
path_gromacs ="/home/faccioli/Programs/gmx-4.6.5/no_mpi/bin/"
main_command = "echo C-alpha C-alpha | @PATH_GROMACS@./g_rms -f @PROT@ -s @NATIVE@ -o temporary_rmsd.xvg 2>/dev/null"
""" This function obtains all pdb files
in mypath
"""
def get_PROT_IND_files_pdb(mypath):
only_pdb_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
#if file.endswith(".pdb"):
if file.find("PROT_IND_") >=0:
f_path = os.path.join(root,file)
only_pdb_file.append(f_path)
return only_pdb_file
def main():
pdb_path = sys.argv[1]
dict_rmsd = {}
all_pdbs = get_PROT_IND_files_pdb(pdb_path)
for pdb in all_pdbs:
aux_command = main_command.replace("@PATH_GROMACS@", path_gromacs).replace("@PROT@",pdb).replace("@NATIVE@", native)
os.system(aux_command)
temp_rmsd = open("temporary_rmsd.xvg", "r")
for line in temp_rmsd.readlines():
if line.find("@") < 0 and line.find("#") <0:
rmsd_value = float(str(line).split()[1])
only_pdb_file_name = os.path.basename(pdb)
dict_rmsd[only_pdb_file_name] = rmsd_value
temp_rmsd.close()
os.remove("temporary_rmsd.xvg")
#Saving dictionary
rmsd_final = open("all_rmsd.txt", "w")
d_sorted_by_value = OrderedDict(sorted(dict_rmsd.items(), key=lambda x: x[1]))
for key, value in d_sorted_by_value.items():
rmsd_final.write(str(key) +"\t" + str(value) + "\n")
rmsd_final.close()
main() | rodrigofaccioli/2pg_cartesian | scripts/analysis/compute_rmsd_pdb_files.py | Python | apache-2.0 | 1,725 |
#!/usr/bin/env python
import argparse
import os
import logging
import cdec.configobj
import cdec.sa
from cdec.sa._sa import monitor_cpu
import sys
MAX_PHRASE_LENGTH = 4
def precompute(f_sa, max_len, max_nt, max_size, min_gap, rank1, rank2, tight_phrases):
lcp = cdec.sa.LCP(f_sa)
stats = sorted(lcp.compute_stats(MAX_PHRASE_LENGTH), reverse=True)
precomp = cdec.sa.Precomputation(from_stats=stats,
fsarray=f_sa,
precompute_rank=rank1,
precompute_secondary_rank=rank2,
max_length=max_len,
max_nonterminals=max_nt,
train_max_initial_size=max_size,
train_min_gap_size=min_gap)
return precomp
def main():
preprocess_start_time = monitor_cpu()
sys.setrecursionlimit(sys.getrecursionlimit() * 100)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('cdec.sa.compile')
parser = argparse.ArgumentParser(description='Compile a corpus into a suffix array.')
parser.add_argument('--maxnt', '-n', type=int, default=2,
help='Maximum number of non-terminal symbols')
parser.add_argument('--maxlen', '-l', type=int, default=5,
help='Maximum number of terminals')
parser.add_argument('--maxsize', '-s', type=int, default=15,
help='Maximum rule span')
parser.add_argument('--mingap', '-g', type=int, default=1,
help='Minimum gap size')
parser.add_argument('--rank1', '-r1', type=int, default=100,
help='Number of pre-computed frequent patterns')
parser.add_argument('--rank2', '-r2', type=int, default=10,
help='Number of pre-computed super-frequent patterns)')
parser.add_argument('--loose', action='store_true',
help='Enable loose phrase extraction (default: tight)')
parser.add_argument('-c', '--config', default='/dev/stdout',
help='Output configuration')
parser.add_argument('-f', '--source',
help='Source language corpus')
parser.add_argument('-e', '--target',
help='Target language corpus')
parser.add_argument('-b', '--bitext',
help='Parallel text (source ||| target)')
parser.add_argument('-a', '--alignment', required=True,
help='Bitext word alignment')
parser.add_argument('-o', '--output', required=True,
help='Output path')
args = parser.parse_args()
if not ((args.source and args.target) or args.bitext):
parser.error('a parallel corpus is required\n'
'\tuse -f (source) with -e (target) or -b (bitext)')
param_names = ('max_len', 'max_nt', 'max_size', 'min_gap',
'rank1', 'rank2', 'tight_phrases')
params = (args.maxlen, args.maxnt, args.maxsize, args.mingap,
args.rank1, args.rank2, not args.loose)
if not os.path.exists(args.output):
os.mkdir(args.output)
f_sa_bin = os.path.join(args.output, 'f.sa.bin')
e_bin = os.path.join(args.output, 'e.bin')
precomp_file = 'precomp.{0}.{1}.{2}.{3}.{4}.{5}.bin'.format(*params)
precomp_bin = os.path.join(args.output, precomp_file)
a_bin = os.path.join(args.output, 'a.bin')
lex_bin = os.path.join(args.output, 'lex.bin')
start_time = monitor_cpu()
logger.info('Compiling source suffix array')
if args.bitext:
f_sa = cdec.sa.SuffixArray(from_text=args.bitext, side='source')
else:
f_sa = cdec.sa.SuffixArray(from_text=args.source)
f_sa.write_binary(f_sa_bin)
stop_time = monitor_cpu()
logger.info('Compiling source suffix array took %f seconds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Compiling target data array')
if args.bitext:
e = cdec.sa.DataArray(from_text=args.bitext, side='target')
else:
e = cdec.sa.DataArray(from_text=args.target)
e.write_binary(e_bin)
stop_time = monitor_cpu()
logger.info('Compiling target data array took %f seconds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Precomputing frequent phrases')
precompute(f_sa, *params).write_binary(precomp_bin)
stop_time = monitor_cpu()
logger.info('Compiling precomputations took %f seconds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Compiling alignment')
a = cdec.sa.Alignment(from_text=args.alignment)
a.write_binary(a_bin)
stop_time = monitor_cpu()
logger.info('Compiling alignment took %f seonds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Compiling bilexical dictionary')
lex = cdec.sa.BiLex(from_data=True, alignment=a, earray=e, fsarray=f_sa)
lex.write_binary(lex_bin)
stop_time = monitor_cpu()
logger.info('Compiling bilexical dictionary took %f seconds', stop_time - start_time)
# Write configuration
config = cdec.configobj.ConfigObj(args.config, unrepr=True)
config['f_sa_file'] = os.path.abspath(f_sa_bin)
config['e_file'] = os.path.abspath(e_bin)
config['a_file'] = os.path.abspath(a_bin)
config['lex_file'] = os.path.abspath(lex_bin)
config['precompute_file'] = os.path.abspath(precomp_bin)
for name, value in zip(param_names, params):
config[name] = value
config.write()
preprocess_stop_time = monitor_cpu()
logger.info('Overall preprocessing step took %f seconds', preprocess_stop_time - preprocess_start_time)
if __name__ == '__main__':
main()
| kho/mr-cdec | python/pkg/cdec/sa/compile.py | Python | apache-2.0 | 5,575 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Hate Crime Table 1."""
import os
import sys
import unittest
import tempfile
import json
import pandas as pd
from . import preprocess
_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_SCRIPT_PATH, '..')) # for utils
import utils
_YEAR_INDEX = 0
_OUTPUT_COLUMNS = ['Year', 'StatVar', 'Quantity']
class HateCrimeTable1Test(unittest.TestCase):
def test_csv(self):
csv_files = []
test_config = {
'type': 'xls',
'path': 'testdata/2019.xls',
'args': {
'header': 3,
'skipfooter': 3
}
}
with tempfile.TemporaryDirectory() as tmp_dir:
xls_file_path = os.path.join(_SCRIPT_PATH, test_config['path'])
csv_file_path = os.path.join(tmp_dir, '2019.csv')
read_file = pd.read_excel(xls_file_path, **test_config['args'])
read_file = preprocess._clean_dataframe(read_file)
read_file.insert(_YEAR_INDEX, 'Year', '2019')
read_file.to_csv(csv_file_path, index=None, header=True)
csv_files.append(csv_file_path)
config_path = os.path.join(_SCRIPT_PATH, 'config.json')
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
cleaned_csv_path = os.path.join(tmp_dir, 'cleaned.csv')
utils.create_csv_mcf(csv_files, cleaned_csv_path, config,
_OUTPUT_COLUMNS, preprocess._write_output_csv)
with open(cleaned_csv_path, 'r', encoding='utf-8') as f_result:
test_result = f_result.read()
expected_csv_path = os.path.join(_SCRIPT_PATH, 'testdata',
'expected.csv')
with open(expected_csv_path, 'r',
encoding='utf-8') as f_expected:
expected_result = f_expected.read()
self.assertEqual(test_result, expected_result)
| datacommonsorg/data | scripts/fbi/hate_crime/table1/preprocess_test.py | Python | apache-2.0 | 2,606 |
# print absolute value of an integer:
a = -100
if a >= 0:
print a
else:
print -a
| xingchaoma/github-python-study | byteofpython/ch01_intro/a.py | Python | apache-2.0 | 93 |
import datetime
import hashlib
import itertools
import logging
import os
import time
from collections import defaultdict
from dataclasses import asdict, dataclass, field
from operator import itemgetter
from typing import (
IO,
AbstractSet,
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import django.db.utils
import orjson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import IntegrityError, connection, transaction
from django.db.models import Count, Exists, F, OuterRef, Q, Sum
from django.db.models.query import QuerySet
from django.utils.html import escape
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
from django.utils.translation import override as override_language
from psycopg2.extras import execute_values
from psycopg2.sql import SQL
from typing_extensions import TypedDict
from analytics.lib.counts import COUNT_STATS, do_increment_logging_stat
from analytics.models import RealmCount, StreamCount
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
confirmation_url,
create_confirmation_link,
generate_key,
)
from zerver.decorator import statsd_increment
from zerver.lib import retention as retention
from zerver.lib.addressee import Addressee
from zerver.lib.alert_words import (
add_user_alert_words,
get_alert_word_automaton,
remove_user_alert_words,
)
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.cache import (
bot_dict_fields,
cache_delete,
cache_delete_many,
cache_set,
cache_set_many,
cache_with_key,
delete_user_profile_caches,
display_recipient_cache_key,
flush_user_profile,
get_stream_cache_key,
to_dict_cache_key_id,
user_profile_by_api_key_cache_key,
user_profile_delivery_email_cache_key,
)
from zerver.lib.create_user import create_user, get_display_email_address
from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.email_validation import (
email_reserved_for_system_bots_error,
get_existing_user_errors,
get_realm_email_validator,
validate_email_is_valid,
)
from zerver.lib.emoji import check_emoji_request, emoji_name_to_emoji_code, get_emoji_file_name
from zerver.lib.exceptions import (
InvitationError,
JsonableError,
MarkdownRenderingException,
StreamDoesNotExistError,
StreamWithIDDoesNotExistError,
ZephyrMessageAlreadySentException,
)
from zerver.lib.export import get_realm_exports_serialized
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.i18n import get_language_name
from zerver.lib.markdown import MessageRenderingResult, topic_links
from zerver.lib.markdown import version as markdown_version
from zerver.lib.mention import MentionBackend, MentionData, silent_mention_syntax_for_user
from zerver.lib.message import (
MessageDict,
SendMessageRequest,
access_message,
bulk_access_messages,
get_last_message_id,
normalize_body,
render_markdown,
truncate_topic,
update_first_visible_message_id,
wildcard_mention_allowed,
)
from zerver.lib.notification_data import UserMessageNotificationsData, get_user_group_mentions_data
from zerver.lib.pysa import mark_sanitized
from zerver.lib.queue import queue_json_publish
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_data
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import (
FromAddress,
clear_scheduled_emails,
clear_scheduled_invitation_emails,
send_email,
send_email_to_admins,
)
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.storage import static_path
from zerver.lib.stream_subscription import (
SubInfo,
bulk_get_private_peers,
bulk_get_subscriber_peer_info,
get_active_subscriptions_for_stream_id,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_subscribed_stream_ids_for_user,
get_subscriptions_for_send_message,
get_used_colors_for_user_ids,
get_user_ids_for_streams,
num_subscribers_for_stream_id,
subscriber_ids_with_stream_history_access,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.streams import (
access_stream_by_id,
access_stream_for_send_message,
can_access_stream_user_ids,
check_stream_access_based_on_stream_post_policy,
create_stream_if_needed,
get_default_value_for_history_public_to_subscribers,
get_stream_permission_policy_name,
get_web_public_streams_queryset,
render_stream_description,
send_stream_creation_event,
subscribed_to_stream,
)
from zerver.lib.string_validation import check_stream_name, check_stream_topic
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.timezone import canonicalize_timezone
from zerver.lib.topic import (
LEGACY_PREV_TOPIC,
ORIG_TOPIC,
RESOLVED_TOPIC_PREFIX,
TOPIC_LINKS,
TOPIC_NAME,
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
messages_for_topic,
save_message_for_edit_use_case,
update_edit_history,
update_messages_for_topic_edit,
)
from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute
from zerver.lib.types import ProfileDataElementValue, ProfileFieldData, UnspecifiedValue
from zerver.lib.upload import (
claim_attachment,
delete_avatar_image,
delete_export_tarball,
delete_message_image,
upload_emoji_image,
)
from zerver.lib.user_groups import access_user_group_by_id, create_user_group
from zerver.lib.user_mutes import add_user_mute, get_muting_users, get_user_mutes
from zerver.lib.user_status import update_user_status
from zerver.lib.users import (
check_bot_name_available,
check_full_name,
format_user_row,
get_api_key,
user_profile_to_user_row,
)
from zerver.lib.utils import generate_api_key, log_statsd_event
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions, is_widget_message
from zerver.models import (
Attachment,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
DefaultStreamGroup,
Draft,
EmailChangeStatus,
Message,
MultiuseInvite,
MutedUser,
PreregistrationUser,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
RealmPlayground,
RealmUserDefault,
Recipient,
ScheduledEmail,
ScheduledMessage,
ScheduledMessageNotificationEmail,
Service,
Stream,
SubMessage,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
UserStatus,
UserTopic,
active_non_guest_user_ids,
active_user_ids,
custom_profile_fields_for_realm,
filter_to_valid_prereg_users,
get_active_streams,
get_bot_dicts_in_realm,
get_bot_services,
get_client,
get_default_stream_groups,
get_fake_email_domain,
get_huddle_recipient,
get_huddle_user_ids,
get_old_unclaimed_attachments,
get_realm,
get_realm_playgrounds,
get_stream,
get_stream_by_id_in_realm,
get_system_bot,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
get_user_profile_by_id,
is_cross_realm_bot_email,
linkifiers_for_realm,
query_for_ids,
realm_filters_for_realm,
validate_attachment_request,
)
from zerver.tornado.django_api import send_event
if settings.BILLING_ENABLED:
from corporate.lib.stripe import (
downgrade_now_without_creating_additional_invoices,
update_license_ledger_if_needed,
)
@dataclass
class SubscriptionInfo:
subscriptions: List[Dict[str, Any]]
unsubscribed: List[Dict[str, Any]]
never_subscribed: List[Dict[str, Any]]
# These are hard to type-check because of the API_FIELDS loops.
RawStreamDict = Dict[str, Any]
RawSubscriptionDict = Dict[str, Any]
ONBOARDING_TOTAL_MESSAGES = 1000
ONBOARDING_UNREAD_MESSAGES = 20
ONBOARDING_RECENT_TIMEDELTA = datetime.timedelta(weeks=1)
STREAM_ASSIGNMENT_COLORS = [
"#76ce90",
"#fae589",
"#a6c7e5",
"#e79ab5",
"#bfd56f",
"#f4ae55",
"#b0a5fd",
"#addfe5",
"#f5ce6e",
"#c2726a",
"#94c849",
"#bd86e5",
"#ee7e4a",
"#a6dcbf",
"#95a5fd",
"#53a063",
"#9987e1",
"#e4523d",
"#c2c2c2",
"#4f8de4",
"#c6a8ad",
"#e7cc4d",
"#c8bebf",
"#a47462",
]
def subscriber_info(user_id: int) -> Dict[str, Any]:
return {"id": user_id, "flags": ["read"]}
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream
and user_profile.default_sending_stream.invite_only
or user_profile.default_events_register_stream
and user_profile.default_events_register_stream.invite_only
)
if is_private_bot:
return {user_profile.bot_owner_id}
else:
users = {user.id for user in user_profile.realm.get_human_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]:
human_counts = {
str(UserProfile.ROLE_REALM_ADMINISTRATOR): 0,
str(UserProfile.ROLE_REALM_OWNER): 0,
str(UserProfile.ROLE_MODERATOR): 0,
str(UserProfile.ROLE_MEMBER): 0,
str(UserProfile.ROLE_GUEST): 0,
}
for value_dict in list(
UserProfile.objects.filter(realm=realm, is_bot=False, is_active=True)
.values("role")
.annotate(Count("role"))
):
human_counts[str(value_dict["role"])] = value_dict["role__count"]
bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count()
return {
RealmAuditLog.ROLE_COUNT_HUMANS: human_counts,
RealmAuditLog.ROLE_COUNT_BOTS: bot_count,
}
def get_signups_stream(realm: Realm) -> Stream:
# This one-liner helps us work around a lint rule.
return get_stream("signups", realm)
def send_message_to_signup_notification_stream(
sender: UserProfile, realm: Realm, message: str, topic_name: str = _("signups")
) -> None:
signup_notifications_stream = realm.get_signup_notifications_stream()
if signup_notifications_stream is None:
return
with override_language(realm.default_language):
internal_send_stream_message(sender, signup_notifications_stream, topic_name, message)
def notify_new_user(user_profile: UserProfile) -> None:
user_count = realm_user_count(user_profile.realm)
sender_email = settings.NOTIFICATION_BOT
sender = get_system_bot(sender_email, user_profile.realm_id)
is_first_user = user_count == 1
if not is_first_user:
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=silent_mention_syntax_for_user(user_profile), user_count=user_count
)
if settings.BILLING_ENABLED:
from corporate.lib.registration import generate_licenses_low_warning_message_if_required
licenses_low_warning_message = generate_licenses_low_warning_message_if_required(
user_profile.realm
)
if licenses_low_warning_message is not None:
message += "\n"
message += licenses_low_warning_message
send_message_to_signup_notification_stream(sender, user_profile.realm, message)
# We also send a notification to the Zulip administrative realm
admin_realm = get_realm(settings.SYSTEM_BOT_REALM)
admin_realm_sender = get_system_bot(sender_email, admin_realm.id)
try:
# Check whether the stream exists
signups_stream = get_signups_stream(admin_realm)
# We intentionally use the same strings as above to avoid translation burden.
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"{user_profile.full_name} <`{user_profile.email}`>", user_count=user_count
)
internal_send_stream_message(
admin_realm_sender, signups_stream, user_profile.realm.display_subdomain, message
)
except Stream.DoesNotExist:
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
def notify_invites_changed(realm: Realm) -> None:
event = dict(type="invites_changed")
admin_ids = [user.id for user in realm.get_admin_users_and_bots()]
send_event(realm, event, admin_ids)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
"""Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public
streams, so you have something to look at in your home view once
you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES
are marked unread.
"""
one_week_ago = timezone_now() - ONBOARDING_RECENT_TIMEDELTA
recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only]
recent_messages = Message.objects.filter(
recipient_id__in=recipient_ids, date_sent__gt=one_week_ago
).order_by("-id")
message_ids_to_use = list(
reversed(recent_messages.values_list("id", flat=True)[0:ONBOARDING_TOTAL_MESSAGES])
)
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(
UserMessage.objects.filter(
message_id__in=message_ids_to_use, user_profile=user_profile
).values_list("message_id", flat=True)
)
# Mark the newest ONBOARDING_UNREAD_MESSAGES as unread.
marked_unread = 0
ums_to_create = []
for message_id in reversed(message_ids_to_use):
if message_id in already_ids:
continue
um = UserMessage(user_profile=user_profile, message_id=message_id)
if marked_unread < ONBOARDING_UNREAD_MESSAGES:
marked_unread += 1
else:
um.flags = UserMessage.flags.read
ums_to_create.append(um)
UserMessage.objects.bulk_create(reversed(ums_to_create))
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
def process_new_human_user(
user_profile: UserProfile,
prereg_user: Optional[PreregistrationUser] = None,
default_stream_groups: Sequence[DefaultStreamGroup] = [],
realm_creation: bool = False,
) -> None:
realm = user_profile.realm
mit_beta_user = realm.is_zephyr_mirror_realm
if prereg_user is not None:
streams: List[Stream] = list(prereg_user.streams.all())
acting_user: Optional[UserProfile] = prereg_user.referred_by
else:
streams = []
acting_user = None
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(
realm,
streams,
[user_profile],
from_user_creation=True,
acting_user=acting_user,
)
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if (
not mit_beta_user
and prereg_user is not None
and prereg_user.referred_by is not None
and prereg_user.referred_by.is_active
):
# This is a cross-realm private message.
with override_language(prereg_user.referred_by.default_language):
internal_send_private_message(
get_system_bot(settings.NOTIFICATION_BOT, prereg_user.referred_by.realm_id),
prereg_user.referred_by,
_("{user} accepted your invitation to join Zulip!").format(
user=f"{user_profile.full_name} <`{user_profile.email}`>"
),
)
revoke_preregistration_users(user_profile, prereg_user, realm_creation)
if not realm_creation and prereg_user is not None and prereg_user.referred_by is not None:
notify_invites_changed(user_profile.realm)
notify_new_user(user_profile)
# Clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.delivery_email)
if realm.send_welcome_emails:
enqueue_welcome_emails(user_profile, realm_creation)
# We have an import loop here; it's intentional, because we want
# to keep all the onboarding code in zerver/lib/onboarding.py.
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
def revoke_preregistration_users(
created_user_profile: UserProfile,
used_preregistration_user: Optional[PreregistrationUser],
realm_creation: bool,
) -> None:
if used_preregistration_user is None:
assert not realm_creation, "realm_creation should only happen with a PreregistrationUser"
if used_preregistration_user is not None:
used_preregistration_user.status = confirmation_settings.STATUS_ACTIVE
used_preregistration_user.save(update_fields=["status"])
# In the special case of realm creation, there can be no additional PreregistrationUser
# for us to want to modify - because other realm_creation PreregistrationUsers should be
# left usable for creating different realms.
if realm_creation:
return
# Mark any other PreregistrationUsers in the realm that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics.
if used_preregistration_user is not None:
PreregistrationUser.objects.filter(
email__iexact=created_user_profile.delivery_email, realm=created_user_profile.realm
).exclude(id=used_preregistration_user.id).update(
status=confirmation_settings.STATUS_REVOKED
)
else:
PreregistrationUser.objects.filter(
email__iexact=created_user_profile.delivery_email, realm=created_user_profile.realm
).update(status=confirmation_settings.STATUS_REVOKED)
def notify_created_user(user_profile: UserProfile) -> None:
user_row = user_profile_to_user_row(user_profile)
person = format_user_row(
user_profile.realm,
user_profile,
user_row,
# Since we don't know what the client
# supports at this point in the code, we
# just assume client_gravatar and
# user_avatar_url_field_optional = False :(
client_gravatar=False,
user_avatar_url_field_optional=False,
# We assume there's no custom profile
# field data for a new user; initial
# values are expected to be added in a
# later event.
custom_profile_field_data={},
)
event: Dict[str, Any] = dict(type="realm_user", op="add", person=person)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:
def stream_name(stream: Optional[Stream]) -> Optional[str]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(
email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=get_api_key(user_profile),
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
services=get_service_dicts_for_bot(user_profile.id),
)
# Set the owner key only when the bot has an owner.
# The default bots don't have an owner. So don't
# set the owner key while reactivating them.
if user_profile.bot_owner is not None:
bot["owner_id"] = user_profile.bot_owner.id
return dict(type="realm_bot", op="add", bot=bot)
def notify_created_bot(user_profile: UserProfile) -> None:
event = created_bot_event(user_profile)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def create_users(
realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int] = None
) -> None:
user_set = set()
for full_name, email in name_list:
user_set.add((email, full_name, True))
bulk_create_users(realm, user_set, bot_type)
def do_create_user(
email: str,
password: Optional[str],
realm: Realm,
full_name: str,
bot_type: Optional[int] = None,
role: Optional[int] = None,
bot_owner: Optional[UserProfile] = None,
tos_version: Optional[str] = None,
timezone: str = "",
avatar_source: str = UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream: Optional[Stream] = None,
default_events_register_stream: Optional[Stream] = None,
default_all_public_streams: Optional[bool] = None,
prereg_user: Optional[PreregistrationUser] = None,
default_stream_groups: Sequence[DefaultStreamGroup] = [],
source_profile: Optional[UserProfile] = None,
realm_creation: bool = False,
*,
acting_user: Optional[UserProfile],
enable_marketing_emails: bool = True,
) -> UserProfile:
with transaction.atomic():
user_profile = create_user(
email=email,
password=password,
realm=realm,
full_name=full_name,
role=role,
bot_type=bot_type,
bot_owner=bot_owner,
tos_version=tos_version,
timezone=timezone,
avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
source_profile=source_profile,
enable_marketing_emails=enable_marketing_emails,
)
event_time = user_profile.date_joined
if not acting_user:
acting_user = user_profile
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_CREATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
if realm_creation:
# If this user just created a realm, make sure they are
# properly tagged as the creator of the realm.
realm_creation_audit_log = (
RealmAuditLog.objects.filter(event_type=RealmAuditLog.REALM_CREATED, realm=realm)
.order_by("id")
.last()
)
assert realm_creation_audit_log is not None
realm_creation_audit_log.acting_user = user_profile
realm_creation_audit_log.save(update_fields=["acting_user"])
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
# Note that for bots, the caller will send an additional event
# with bot-specific info like services.
notify_created_user(user_profile)
if bot_type is None:
process_new_human_user(
user_profile,
prereg_user=prereg_user,
default_stream_groups=default_stream_groups,
realm_creation=realm_creation,
)
return user_profile
def do_activate_mirror_dummy_user(
user_profile: UserProfile, *, acting_user: Optional[UserProfile]
) -> None:
"""Called to have a user "take over" a "mirror dummy" user
(i.e. is_mirror_dummy=True) account when they sign up with the
same email address.
Essentially, the result should be as though we had created the
UserProfile just now with do_create_user, except that the mirror
dummy user may appear as the recipient or sender of messages from
before their account was fully created.
TODO: This function likely has bugs resulting from this being a
parallel code path to do_create_user; e.g. it likely does not
handle preferences or default streams properly.
"""
with transaction.atomic():
change_user_is_active(user_profile, True)
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TERMS_OF_SERVICE_VERSION
user_profile.save(
update_fields=["date_joined", "password", "is_mirror_dummy", "tos_version"]
)
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_ACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, *, acting_user: Optional[UserProfile]) -> None:
"""Reactivate a user that had previously been deactivated"""
with transaction.atomic():
change_user_is_active(user_profile, True)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_REACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
subscribed_recipient_ids = Subscription.objects.filter(
user_profile_id=user_profile.id, active=True, recipient__type=Recipient.STREAM
).values_list("recipient__type_id", flat=True)
subscribed_streams = Stream.objects.filter(id__in=subscribed_recipient_ids, deactivated=False)
subscriber_peer_info = bulk_get_subscriber_peer_info(
realm=user_profile.realm,
streams=subscribed_streams,
)
altered_user_dict: Dict[int, Set[int]] = defaultdict(set)
for stream in subscribed_streams:
altered_user_dict[stream.id] = {user_profile.id}
stream_dict = {stream.id: stream for stream in subscribed_streams}
send_peer_subscriber_events(
op="peer_add",
realm=user_profile.realm,
altered_user_dict=altered_user_dict,
stream_dict=stream_dict,
private_peer_dict=subscriber_peer_info.private_peer_dict,
)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
@transaction.atomic(savepoint=False)
def do_set_realm_property(
realm: Realm, name: str, value: Any, *, acting_user: Optional[UserProfile]
) -> None:
"""Takes in a realm object, the name of an attribute to update, the
value to update and and the user who initiated the update.
"""
property_type = Realm.property_types[name]
assert isinstance(
value, property_type
), f"Cannot update {name}: {value} is not an instance of {property_type}"
old_value = getattr(realm, name)
setattr(realm, name, value)
realm.save(update_fields=[name])
event = dict(
type="realm",
op="update",
property=name,
value=value,
)
transaction.on_commit(lambda: send_event(realm, event, active_user_ids(realm.id)))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
"property": name,
}
).decode(),
)
if name == "email_address_visibility":
if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]:
# We use real email addresses on UserProfile.email only if
# EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so
# changes between values that will not require changing
# that field, so we can save work and return here.
return
user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False)
for user_profile in user_profiles:
user_profile.email = get_display_email_address(user_profile)
UserProfile.objects.bulk_update(user_profiles, ["email"])
for user_profile in user_profiles:
transaction.on_commit(
lambda: flush_user_profile(sender=UserProfile, instance=user_profile)
)
# TODO: Design a bulk event for this or force-reload all clients
send_user_email_update_event(user_profile)
def do_set_realm_authentication_methods(
realm: Realm, authentication_methods: Dict[str, bool], *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.authentication_methods_dict()
with transaction.atomic():
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=["authentication_methods"])
updated_value = realm.authentication_methods_dict()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=timezone_now(),
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: updated_value,
"property": "authentication_methods",
}
).decode(),
)
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(authentication_methods=updated_value),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_editing(
realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
edit_topic_policy: int,
*,
acting_user: Optional[UserProfile],
) -> None:
old_values = dict(
allow_message_editing=realm.allow_message_editing,
message_content_edit_limit_seconds=realm.message_content_edit_limit_seconds,
edit_topic_policy=realm.edit_topic_policy,
)
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.edit_topic_policy = edit_topic_policy
event_time = timezone_now()
updated_properties = dict(
allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds,
edit_topic_policy=edit_topic_policy,
)
with transaction.atomic():
for updated_property, updated_value in updated_properties.items():
if updated_value == old_values[updated_property]:
continue
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_values[updated_property],
RealmAuditLog.NEW_VALUE: updated_value,
"property": updated_property,
}
).decode(),
)
realm.save(update_fields=list(updated_properties.keys()))
event = dict(
type="realm",
op="update_dict",
property="default",
data=updated_properties,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(
realm: Realm, stream: Optional[Stream], stream_id: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.notifications_stream_id
realm.notifications_stream = stream
with transaction.atomic():
realm.save(update_fields=["notifications_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: stream_id,
"property": "notifications_stream",
}
).decode(),
)
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(
realm: Realm, stream: Optional[Stream], stream_id: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.signup_notifications_stream_id
realm.signup_notifications_stream = stream
with transaction.atomic():
realm.save(update_fields=["signup_notifications_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: stream_id,
"property": "signup_notifications_stream",
}
).decode(),
)
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_user_default_setting(
realm_user_default: RealmUserDefault,
name: str,
value: Any,
*,
acting_user: Optional[UserProfile],
) -> None:
old_value = getattr(realm_user_default, name)
realm = realm_user_default.realm
event_time = timezone_now()
with transaction.atomic(savepoint=False):
setattr(realm_user_default, name, value)
realm_user_default.save(update_fields=[name])
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_DEFAULT_USER_SETTINGS_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
"property": name,
}
).decode(),
)
event = dict(
type="realm_user_settings_defaults",
op="update",
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
if settings.BILLING_ENABLED:
downgrade_now_without_creating_additional_invoices(realm)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_DEACTIVATED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}
).decode(),
)
ScheduledEmail.objects.filter(realm=realm).delete()
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
# This event will only ever be received by clients with an active
# longpoll connection, because by this point clients will be
# unable to authenticate again to their event queue (triggering an
# immediate reload into the page explaining the realm was
# deactivated). So the purpose of sending this is to flush all
# active longpoll connections for the realm.
event = dict(type="realm", op="deactivated", realm_id=realm.id)
send_event(realm, event, active_user_ids(realm.id))
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
with transaction.atomic():
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_REACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}
).decode(),
)
def do_change_realm_subdomain(
realm: Realm, new_subdomain: str, *, acting_user: Optional[UserProfile]
) -> None:
"""Changing a realm's subdomain is a highly disruptive operation,
because all existing clients will need to be updated to point to
the new URL. Further, requests to fetch data from existing event
queues will fail with an authentication error when this change
happens (because the old subdomain is no longer associated with
the realm), making it hard for us to provide a graceful update
experience for clients.
"""
old_subdomain = realm.subdomain
old_uri = realm.uri
# If the realm had been a demo organization scheduled for
# deleting, clear that state.
realm.demo_organization_scheduled_deletion_date = None
realm.string_id = new_subdomain
with transaction.atomic():
realm.save(update_fields=["string_id", "demo_organization_scheduled_deletion_date"])
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_SUBDOMAIN_CHANGED,
event_time=timezone_now(),
acting_user=acting_user,
extra_data={"old_subdomain": old_subdomain, "new_subdomain": new_subdomain},
)
# If a realm if being renamed multiple times, we should find all the placeholder
# realms and reset their deactivated_redirect field to point to the new realm uri
placeholder_realms = Realm.objects.filter(deactivated_redirect=old_uri, deactivated=True)
for placeholder_realm in placeholder_realms:
do_add_deactivated_redirect(placeholder_realm, realm.uri)
# The below block isn't executed in a transaction with the earlier code due to
# the functions called below being complex and potentially sending events,
# which we don't want to do in atomic blocks.
# When we change a realm's subdomain the realm with old subdomain is basically
# deactivated. We are creating a deactivated realm using old subdomain and setting
# it's deactivated redirect to new_subdomain so that we can tell the users that
# the realm has been moved to a new subdomain.
placeholder_realm = do_create_realm(old_subdomain, realm.name)
do_deactivate_realm(placeholder_realm, acting_user=None)
do_add_deactivated_redirect(placeholder_realm, realm.uri)
def do_add_deactivated_redirect(realm: Realm, redirect_url: str) -> None:
realm.deactivated_redirect = redirect_url
realm.save(update_fields=["deactivated_redirect"])
def do_scrub_realm(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
if settings.BILLING_ENABLED:
downgrade_now_without_creating_additional_invoices(realm)
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_delete_messages_by_sender(user)
do_delete_avatar_image(user, acting_user=acting_user)
user.full_name = f"Scrubbed {generate_key()[:15]}"
scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}"
user.email = scrubbed_email
user.delivery_email = scrubbed_email
user.save(update_fields=["full_name", "email", "delivery_email"])
do_remove_realm_custom_profile_fields(realm)
Attachment.objects.filter(realm=realm).delete()
RealmAuditLog.objects.create(
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
event_type=RealmAuditLog.REALM_SCRUBBED,
)
def do_delete_user(user_profile: UserProfile) -> None:
if user_profile.realm.is_zephyr_mirror_realm:
raise AssertionError("Deleting zephyr mirror users is not supported")
do_deactivate_user(user_profile, acting_user=None)
subscribed_huddle_recipient_ids = set(
Subscription.objects.filter(
user_profile=user_profile, recipient__type=Recipient.HUDDLE
).values_list("recipient_id", flat=True)
)
user_id = user_profile.id
realm = user_profile.realm
date_joined = user_profile.date_joined
personal_recipient = user_profile.recipient
with transaction.atomic():
user_profile.delete()
# Recipient objects don't get deleted through CASCADE, so we need to handle
# the user's personal recipient manually. This will also delete all Messages pointing
# to this recipient (all private messages sent to the user).
assert personal_recipient is not None
personal_recipient.delete()
replacement_user = create_user(
force_id=user_id,
email=f"deleteduser{user_id}@{get_fake_email_domain(realm)}",
password=None,
realm=realm,
full_name=f"Deleted User {user_id}",
active=False,
is_mirror_dummy=True,
force_date_joined=date_joined,
)
subs_to_recreate = [
Subscription(
user_profile=replacement_user,
recipient=recipient,
is_user_active=replacement_user.is_active,
)
for recipient in Recipient.objects.filter(id__in=subscribed_huddle_recipient_ids)
]
Subscription.objects.bulk_create(subs_to_recreate)
RealmAuditLog.objects.create(
realm=replacement_user.realm,
modified_user=replacement_user,
acting_user=None,
event_type=RealmAuditLog.USER_DELETED,
event_time=timezone_now(),
)
def change_user_is_active(user_profile: UserProfile, value: bool) -> None:
"""
Helper function for changing the .is_active field. Not meant as a standalone function
in production code as properly activating/deactivating users requires more steps.
This changes the is_active value and saves it, while ensuring
Subscription.is_user_active values are updated in the same db transaction.
"""
with transaction.atomic(savepoint=False):
user_profile.is_active = value
user_profile.save(update_fields=["is_active"])
Subscription.objects.filter(user_profile=user_profile).update(is_user_active=value)
def get_active_bots_owned_by_user(user_profile: UserProfile) -> QuerySet:
return UserProfile.objects.filter(is_bot=True, is_active=True, bot_owner=user_profile)
def do_deactivate_user(
user_profile: UserProfile, _cascade: bool = True, *, acting_user: Optional[UserProfile]
) -> None:
if not user_profile.is_active:
return
if _cascade:
# We need to deactivate bots before the target user, to ensure
# that a failure partway through this function cannot result
# in only the user being deactivated.
bot_profiles = get_active_bots_owned_by_user(user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, _cascade=False, acting_user=acting_user)
with transaction.atomic():
if user_profile.realm.is_zephyr_mirror_realm: # nocoverage
# For zephyr mirror users, we need to make them a mirror dummy
# again; otherwise, other users won't get the correct behavior
# when trying to send messages to this person inside Zulip.
#
# Ideally, we need to also ensure their zephyr mirroring bot
# isn't running, but that's a separate issue.
user_profile.is_mirror_dummy = True
user_profile.save(update_fields=["is_mirror_dummy"])
change_user_is_active(user_profile, False)
delete_user_sessions(user_profile)
clear_scheduled_emails(user_profile.id)
revoke_invites_generated_by_user(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_DEACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
increment=-1,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
event = dict(
type="realm_user",
op="remove",
person=dict(user_id=user_profile.id, full_name=user_profile.full_name),
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(
type="realm_bot",
op="remove",
bot=dict(user_id=user_profile.id, full_name=user_profile.full_name),
)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
@transaction.atomic(savepoint=False)
def do_deactivate_stream(
stream: Stream, log: bool = True, *, acting_user: Optional[UserProfile]
) -> None:
# We want to mark all messages in the to-be-deactivated stream as
# read for all users; otherwise they will pollute queries like
# "Get the user's first unread message". Since this can be an
# expensive operation, we do it via the deferred_work queue
# processor.
deferred_work_event = {
"type": "mark_stream_messages_as_read_for_everyone",
"stream_recipient_id": stream.recipient_id,
}
transaction.on_commit(lambda: queue_json_publish("deferred_work", deferred_work_event))
# Get the affected user ids *before* we deactivate everybody.
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id, include_deactivated_users=True).update(
active=False
)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
# Prepend a substring of the hashed stream ID to the new stream name
streamID = str(stream.id)
stream_id_hash_object = hashlib.sha512(streamID.encode())
hashed_stream_id = stream_id_hash_object.hexdigest()[0:7]
new_name = (hashed_stream_id + "!DEACTIVATED:" + old_name)[: Stream.MAX_NAME_LENGTH]
stream.name = new_name[: Stream.MAX_NAME_LENGTH]
stream.save(update_fields=["name", "deactivated", "invite_only"])
# If this is a default stream, remove it, properly sending a
# notification to browser clients.
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id)
for group in default_stream_groups_for_stream:
do_remove_streams_from_default_stream_group(stream.realm, group, [stream])
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete", streams=[stream_dict])
transaction.on_commit(lambda: send_event(stream.realm, event, affected_user_ids))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_DEACTIVATED,
event_time=event_time,
)
def send_user_email_update_event(user_profile: UserProfile) -> None:
payload = dict(user_id=user_profile.id, new_email=user_profile.email)
event = dict(type="realm_user", op="update", person=payload)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
event,
active_user_ids(user_profile.realm_id),
)
)
@transaction.atomic(savepoint=False)
def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None:
delete_user_profile_caches([user_profile])
user_profile.delivery_email = new_email
if user_profile.email_address_is_realm_public():
user_profile.email = new_email
user_profile.save(update_fields=["email", "delivery_email"])
else:
user_profile.save(update_fields=["delivery_email"])
# We notify just the target user (and eventually org admins, only
# when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS)
# about their new delivery email, since that field is private.
payload = dict(user_id=user_profile.id, delivery_email=new_email)
event = dict(type="realm_user", op="update", person=payload)
transaction.on_commit(lambda: send_event(user_profile.realm, event, [user_profile.id]))
if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
# If the user is using Gravatar to manage their email address,
# their Gravatar just changed, and we need to notify other
# clients.
notify_avatar_url_change(user_profile)
if user_profile.email_address_is_realm_public():
# Additionally, if we're also changing the publicly visible
# email, we send a new_email event as well.
send_user_email_update_event(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_EMAIL_CHANGED,
event_time=event_time,
)
def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:
old_email = user_profile.delivery_email
obj = EmailChangeStatus.objects.create(
new_email=new_email,
old_email=old_email,
user_profile=user_profile,
realm=user_profile.realm,
)
activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update(
old_email=old_email,
new_email=new_email,
activate_url=activation_url,
)
language = user_profile.default_language
send_email(
"zerver/emails/confirm_new_email",
to_emails=[new_email],
from_name=FromAddress.security_email_from_name(language=language),
from_address=FromAddress.tokenized_no_reply_address(),
language=language,
context=context,
realm=user_profile.realm,
)
def compute_irc_user_fullname(email: str) -> str:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: str) -> str:
return email.split("@")[0] + " (XMPP)"
@cache_with_key(
lambda realm, email, f: user_profile_delivery_email_cache_key(email, realm),
timeout=3600 * 24 * 7,
)
def create_mirror_user_if_needed(
realm: Realm, email: str, email_to_fullname: Callable[[str], str]
) -> UserProfile:
try:
return get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(
email=email,
password=None,
realm=realm,
full_name=email_to_fullname(email),
active=False,
is_mirror_dummy=True,
)
except IntegrityError:
return get_user_by_delivery_email(email, realm)
def render_incoming_message(
message: Message,
content: str,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[MentionData] = None,
email_gateway: bool = False,
) -> MessageRenderingResult:
realm_alert_words_automaton = get_alert_word_automaton(realm)
try:
rendering_result = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words_automaton=realm_alert_words_automaton,
mention_data=mention_data,
email_gateway=email_gateway,
)
except MarkdownRenderingException:
raise JsonableError(_("Unable to render message"))
return rendering_result
class RecipientInfoResult(TypedDict):
active_user_ids: Set[int]
online_push_user_ids: Set[int]
pm_mention_email_disabled_user_ids: Set[int]
pm_mention_push_disabled_user_ids: Set[int]
stream_email_user_ids: Set[int]
stream_push_user_ids: Set[int]
wildcard_mention_user_ids: Set[int]
muted_sender_user_ids: Set[int]
um_eligible_user_ids: Set[int]
long_term_idle_user_ids: Set[int]
default_bot_user_ids: Set[int]
service_bot_tuples: List[Tuple[int, int]]
all_bot_user_ids: Set[int]
def get_recipient_info(
*,
realm_id: int,
recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: AbstractSet[int] = set(),
possible_wildcard_mention: bool = True,
) -> RecipientInfoResult:
stream_push_user_ids: Set[int] = set()
stream_email_user_ids: Set[int] = set()
wildcard_mention_user_ids: Set[int] = set()
muted_sender_user_ids: Set[int] = get_muting_users(sender_id)
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert len(message_to_user_ids) in [1, 2]
elif recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert stream_topic is not None
user_ids_muting_topic = stream_topic.user_ids_muting_topic()
subscription_rows = (
get_subscriptions_for_send_message(
realm_id=realm_id,
stream_id=stream_topic.stream_id,
possible_wildcard_mention=possible_wildcard_mention,
possibly_mentioned_user_ids=possibly_mentioned_user_ids,
)
.annotate(
user_profile_email_notifications=F(
"user_profile__enable_stream_email_notifications"
),
user_profile_push_notifications=F("user_profile__enable_stream_push_notifications"),
user_profile_wildcard_mentions_notify=F("user_profile__wildcard_mentions_notify"),
)
.values(
"user_profile_id",
"push_notifications",
"email_notifications",
"wildcard_mentions_notify",
"user_profile_email_notifications",
"user_profile_push_notifications",
"user_profile_wildcard_mentions_notify",
"is_muted",
)
.order_by("user_profile_id")
)
message_to_user_ids = [row["user_profile_id"] for row in subscription_rows]
def should_send(setting: str, row: Dict[str, Any]) -> bool:
# This implements the structure that the UserProfile stream notification settings
# are defaults, which can be overridden by the stream-level settings (if those
# values are not null).
if row["is_muted"]:
return False
if row["user_profile_id"] in user_ids_muting_topic:
return False
if row[setting] is not None:
return row[setting]
return row["user_profile_" + setting]
stream_push_user_ids = {
row["user_profile_id"]
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if should_send("push_notifications", row)
}
stream_email_user_ids = {
row["user_profile_id"]
for row in subscription_rows
# Note: muting a stream overrides stream_email_notify
if should_send("email_notifications", row)
}
if possible_wildcard_mention:
# If there's a possible wildcard mention, we need to
# determine the set of users who have enabled the
# "wildcard_mentions_notify" setting (that is, the set of
# users for whom wildcard mentions should be treated like
# personal mentions for notifications). This setting
# applies to both email and push notifications.
wildcard_mention_user_ids = {
row["user_profile_id"]
for row in subscription_rows
if should_send("wildcard_mentions_notify", row)
}
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError("Bad recipient type")
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
# Important note: Because we haven't rendered Markdown yet, we
# don't yet know which of these possibly-mentioned users was
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(is_active=True).values(
"id",
"enable_online_push_notifications",
"enable_offline_email_notifications",
"enable_offline_push_notifications",
"is_bot",
"bot_type",
"long_term_idle",
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(user_ids),
field="id",
)
rows = list(query)
else:
# TODO: We should always have at least one user_id as a recipient
# of any message we send. Right now the exception to this
# rule is `notify_new_user`, which, at least in a possibly
# contrived test scenario, can attempt to send messages
# to an inactive bot. When we plug that hole, we can avoid
# this `else` clause and just `assert(user_ids)`.
#
# UPDATE: It's February 2020 (and a couple years after the above
# comment was written). We have simplified notify_new_user
# so that it should be a little easier to reason about.
# There is currently some cleanup to how we handle cross
# realm bots that is still under development. Once that
# effort is complete, we should be able to address this
# to-do.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
"""Only includes users on the explicit message to line"""
return {row["id"] for row in rows if f(row)} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row["is_bot"] and (row["bot_type"] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
online_push_user_ids = get_ids_for(
lambda r: r["enable_online_push_notifications"],
)
# We deal with only the users who have disabled this setting, since that
# will usually be much smaller a set than those who have enabled it (which
# is the default)
pm_mention_email_disabled_user_ids = get_ids_for(
lambda r: not r["enable_offline_email_notifications"]
)
pm_mention_push_disabled_user_ids = get_ids_for(
lambda r: not r["enable_offline_push_notifications"]
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r),
)
long_term_idle_user_ids = get_ids_for(
lambda r: r["long_term_idle"],
)
# These three bot data structures need to filter from the full set
# of users who either are receiving the message or might have been
# mentioned in it, and so can't use get_ids_for.
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via Markdown, we'll filter
# these data structures for just those users who are either a
# direct recipient or were mentioned; for now, we're just making
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = {
row["id"] for row in rows if row["is_bot"] and row["bot_type"] == UserProfile.DEFAULT_BOT
}
service_bot_tuples = [(row["id"], row["bot_type"]) for row in rows if is_service_bot(row)]
# We also need the user IDs of all bots, to avoid trying to send push/email
# notifications to them. This set will be directly sent to the event queue code
# where we determine notifiability of the message for users.
all_bot_user_ids = {row["id"] for row in rows if row["is_bot"]}
info: RecipientInfoResult = dict(
active_user_ids=active_user_ids,
online_push_user_ids=online_push_user_ids,
pm_mention_email_disabled_user_ids=pm_mention_email_disabled_user_ids,
pm_mention_push_disabled_user_ids=pm_mention_push_disabled_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
muted_sender_user_ids=muted_sender_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples,
all_bot_user_ids=all_bot_user_ids,
)
return info
def get_service_bot_events(
sender: UserProfile,
service_bot_tuples: List[Tuple[int, int]],
mentioned_user_ids: Set[int],
active_user_ids: Set[int],
recipient_type: int,
) -> Dict[str, List[Dict[str, Any]]]:
event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
def maybe_add_event(user_profile_id: int, bot_type: int) -> None:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = "outgoing_webhooks"
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = "embedded_bots"
else:
logging.error(
"Unexpected bot_type for Service bot id=%s: %s",
user_profile_id,
bot_type,
)
return
is_stream = recipient_type == Recipient.STREAM
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
# either mentioned_user_ids (the actual mentioned users) or
# active_user_ids (the actual recipients).
#
# So even though this is implied by the logic below, we filter
# these not-actually-mentioned users here, to help keep this
# function future-proof.
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
return
# Mention triggers, for stream messages
if is_stream and user_profile_id in mentioned_user_ids:
trigger = "mention"
# PM triggers for personal and huddle messages
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = "private_message"
else:
return
event_dict[queue_name].append(
{
"trigger": trigger,
"user_profile_id": user_profile_id,
}
)
for user_profile_id, bot_type in service_bot_tuples:
maybe_add_event(
user_profile_id=user_profile_id,
bot_type=bot_type,
)
return event_dict
def do_schedule_messages(send_message_requests: Sequence[SendMessageRequest]) -> List[int]:
scheduled_messages: List[ScheduledMessage] = []
for send_request in send_message_requests:
scheduled_message = ScheduledMessage()
scheduled_message.sender = send_request.message.sender
scheduled_message.recipient = send_request.message.recipient
topic_name = send_request.message.topic_name()
scheduled_message.set_topic_name(topic_name=topic_name)
scheduled_message.content = send_request.message.content
scheduled_message.sending_client = send_request.message.sending_client
scheduled_message.stream = send_request.stream
scheduled_message.realm = send_request.realm
assert send_request.deliver_at is not None
scheduled_message.scheduled_timestamp = send_request.deliver_at
if send_request.delivery_type == "send_later":
scheduled_message.delivery_type = ScheduledMessage.SEND_LATER
elif send_request.delivery_type == "remind":
scheduled_message.delivery_type = ScheduledMessage.REMIND
scheduled_messages.append(scheduled_message)
ScheduledMessage.objects.bulk_create(scheduled_messages)
return [scheduled_message.id for scheduled_message in scheduled_messages]
def build_message_send_dict(
message: Message,
stream: Optional[Stream] = None,
local_id: Optional[str] = None,
sender_queue_id: Optional[str] = None,
realm: Optional[Realm] = None,
widget_content_dict: Optional[Dict[str, Any]] = None,
email_gateway: bool = False,
mention_backend: Optional[MentionBackend] = None,
limit_unread_user_ids: Optional[Set[int]] = None,
) -> SendMessageRequest:
"""Returns a dictionary that can be passed into do_send_messages. In
production, this is always called by check_message, but some
testing code paths call it directly.
"""
if realm is None:
realm = message.sender.realm
if mention_backend is None:
mention_backend = MentionBackend(realm.id)
mention_data = MentionData(
mention_backend=mention_backend,
content=message.content,
)
if message.is_stream_message():
stream_id = message.recipient.type_id
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=message.topic_name(),
)
else:
stream_topic = None
info = get_recipient_info(
realm_id=realm.id,
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
# Render our message_dicts.
assert message.rendered_content is None
rendering_result = render_incoming_message(
message,
message.content,
info["active_user_ids"],
realm,
mention_data=mention_data,
email_gateway=email_gateway,
)
message.rendered_content = rendering_result.rendered_content
message.rendered_content_version = markdown_version
links_for_embed = rendering_result.links_for_preview
mentioned_user_groups_map = get_user_group_mentions_data(
mentioned_user_ids=rendering_result.mentions_user_ids,
mentioned_user_group_ids=list(rendering_result.mentions_user_group_ids),
mention_data=mention_data,
)
# For single user as well as user group mentions, we set the `mentioned`
# flag on `UserMessage`
for group_id in rendering_result.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
rendering_result.mentions_user_ids.update(members)
# Only send data to Tornado about wildcard mentions if message
# rendering determined the message had an actual wildcard
# mention in it (and not e.g. wildcard mention syntax inside a
# code block).
if rendering_result.mentions_wildcard:
wildcard_mention_user_ids = info["wildcard_mention_user_ids"]
else:
wildcard_mention_user_ids = set()
"""
Once we have the actual list of mentioned ids from message
rendering, we can patch in "default bots" (aka normal bots)
who were directly mentioned in this message as eligible to
get UserMessage rows.
"""
mentioned_user_ids = rendering_result.mentions_user_ids
default_bot_user_ids = info["default_bot_user_ids"]
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
info["um_eligible_user_ids"] |= mentioned_bot_user_ids
message_send_dict = SendMessageRequest(
stream=stream,
local_id=local_id,
sender_queue_id=sender_queue_id,
realm=realm,
mention_data=mention_data,
mentioned_user_groups_map=mentioned_user_groups_map,
message=message,
rendering_result=rendering_result,
active_user_ids=info["active_user_ids"],
online_push_user_ids=info["online_push_user_ids"],
pm_mention_email_disabled_user_ids=info["pm_mention_email_disabled_user_ids"],
pm_mention_push_disabled_user_ids=info["pm_mention_push_disabled_user_ids"],
stream_push_user_ids=info["stream_push_user_ids"],
stream_email_user_ids=info["stream_email_user_ids"],
muted_sender_user_ids=info["muted_sender_user_ids"],
um_eligible_user_ids=info["um_eligible_user_ids"],
long_term_idle_user_ids=info["long_term_idle_user_ids"],
default_bot_user_ids=info["default_bot_user_ids"],
service_bot_tuples=info["service_bot_tuples"],
all_bot_user_ids=info["all_bot_user_ids"],
wildcard_mention_user_ids=wildcard_mention_user_ids,
links_for_embed=links_for_embed,
widget_content=widget_content_dict,
limit_unread_user_ids=limit_unread_user_ids,
)
return message_send_dict
def do_send_messages(
send_message_requests_maybe_none: Sequence[Optional[SendMessageRequest]],
email_gateway: bool = False,
mark_as_read: Sequence[int] = [],
) -> List[int]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
# Filter out messages which didn't pass internal_prep_message properly
send_message_requests = [
send_request
for send_request in send_message_requests_maybe_none
if send_request is not None
]
# Save the message receipts in the database
user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict)
with transaction.atomic():
Message.objects.bulk_create(send_request.message for send_request in send_message_requests)
# Claim attachments in message
for send_request in send_message_requests:
if do_claim_attachments(
send_request.message, send_request.rendering_result.potential_attachment_path_ids
):
send_request.message.has_attachment = True
send_request.message.save(update_fields=["has_attachment"])
ums: List[UserMessageLite] = []
for send_request in send_message_requests:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
# they will be processed later.
mentioned_user_ids = send_request.rendering_result.mentions_user_ids
# Extend the set with users who have muted the sender.
mark_as_read_user_ids = send_request.muted_sender_user_ids
mark_as_read_user_ids.update(mark_as_read)
user_messages = create_user_messages(
message=send_request.message,
rendering_result=send_request.rendering_result,
um_eligible_user_ids=send_request.um_eligible_user_ids,
long_term_idle_user_ids=send_request.long_term_idle_user_ids,
stream_push_user_ids=send_request.stream_push_user_ids,
stream_email_user_ids=send_request.stream_email_user_ids,
mentioned_user_ids=mentioned_user_ids,
mark_as_read_user_ids=mark_as_read_user_ids,
limit_unread_user_ids=send_request.limit_unread_user_ids,
)
for um in user_messages:
user_message_flags[send_request.message.id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
send_request.message.service_queue_events = get_service_bot_events(
sender=send_request.message.sender,
service_bot_tuples=send_request.service_bot_tuples,
mentioned_user_ids=mentioned_user_ids,
active_user_ids=send_request.active_user_ids,
recipient_type=send_request.message.recipient.type,
)
bulk_insert_ums(ums)
for send_request in send_message_requests:
do_widget_post_save_actions(send_request)
# This next loop is responsible for notifying other parts of the
# Zulip system about the messages we just committed to the database:
# * Notifying clients via send_event
# * Triggering outgoing webhooks via the service event queue.
# * Updating the `first_message_id` field for streams without any message history.
# * Implementing the Welcome Bot reply hack
# * Adding links to the embed_links queue for open graph processing.
for send_request in send_message_requests:
realm_id: Optional[int] = None
if send_request.message.is_stream_message():
if send_request.stream is None:
stream_id = send_request.message.recipient.type_id
send_request.stream = Stream.objects.select_related().get(id=stream_id)
# assert needed because stubs for django are missing
assert send_request.stream is not None
realm_id = send_request.stream.realm_id
# Deliver events to the real-time push system, as well as
# enqueuing any additional processing triggered by the message.
wide_message_dict = MessageDict.wide_dict(send_request.message, realm_id)
user_flags = user_message_flags.get(send_request.message.id, {})
"""
TODO: We may want to limit user_ids to only those users who have
UserMessage rows, if only for minor performance reasons.
For now we queue events for all subscribers/sendees of the
message, since downstream code may still do notifications
that don't require UserMessage rows.
Our automated tests have gotten better on this codepath,
but we may have coverage gaps, so we should be careful
about changing the next line.
"""
user_ids = send_request.active_user_ids | set(user_flags.keys())
sender_id = send_request.message.sender_id
# We make sure the sender is listed first in the `users` list;
# this results in the sender receiving the message first if
# there are thousands of recipients, decreasing perceived latency.
if sender_id in user_ids:
user_list = [sender_id] + list(user_ids - {sender_id})
else:
user_list = list(user_ids)
class UserData(TypedDict):
id: int
flags: List[str]
mentioned_user_group_id: Optional[int]
users: List[UserData] = []
for user_id in user_list:
flags = user_flags.get(user_id, [])
user_data: UserData = dict(id=user_id, flags=flags, mentioned_user_group_id=None)
if user_id in send_request.mentioned_user_groups_map:
user_data["mentioned_user_group_id"] = send_request.mentioned_user_groups_map[
user_id
]
users.append(user_data)
sender = send_request.message.sender
message_type = wide_message_dict["type"]
active_users_data = [
ActivePresenceIdleUserData(
alerted="has_alert_word" in user_flags.get(user_id, []),
notifications_data=UserMessageNotificationsData.from_user_id_sets(
user_id=user_id,
flags=user_flags.get(user_id, []),
private_message=(message_type == "private"),
online_push_user_ids=send_request.online_push_user_ids,
pm_mention_push_disabled_user_ids=send_request.pm_mention_push_disabled_user_ids,
pm_mention_email_disabled_user_ids=send_request.pm_mention_email_disabled_user_ids,
stream_push_user_ids=send_request.stream_push_user_ids,
stream_email_user_ids=send_request.stream_email_user_ids,
wildcard_mention_user_ids=send_request.wildcard_mention_user_ids,
muted_sender_user_ids=send_request.muted_sender_user_ids,
all_bot_user_ids=send_request.all_bot_user_ids,
),
)
for user_id in send_request.active_user_ids
]
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
active_users_data=active_users_data,
)
event = dict(
type="message",
message=send_request.message.id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
online_push_user_ids=list(send_request.online_push_user_ids),
pm_mention_push_disabled_user_ids=list(send_request.pm_mention_push_disabled_user_ids),
pm_mention_email_disabled_user_ids=list(
send_request.pm_mention_email_disabled_user_ids
),
stream_push_user_ids=list(send_request.stream_push_user_ids),
stream_email_user_ids=list(send_request.stream_email_user_ids),
wildcard_mention_user_ids=list(send_request.wildcard_mention_user_ids),
muted_sender_user_ids=list(send_request.muted_sender_user_ids),
all_bot_user_ids=list(send_request.all_bot_user_ids),
)
if send_request.message.is_stream_message():
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
# assert needed because stubs for django are missing
assert send_request.stream is not None
if send_request.stream.is_public():
event["realm_id"] = send_request.stream.realm_id
event["stream_name"] = send_request.stream.name
if send_request.stream.invite_only:
event["invite_only"] = True
if send_request.stream.first_message_id is None:
send_request.stream.first_message_id = send_request.message.id
send_request.stream.save(update_fields=["first_message_id"])
if send_request.local_id is not None:
event["local_id"] = send_request.local_id
if send_request.sender_queue_id is not None:
event["sender_queue_id"] = send_request.sender_queue_id
send_event(send_request.realm, event, users)
if send_request.links_for_embed:
event_data = {
"message_id": send_request.message.id,
"message_content": send_request.message.content,
"message_realm_id": send_request.realm.id,
"urls": list(send_request.links_for_embed),
}
queue_json_publish("embed_links", event_data)
if send_request.message.recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(
settings.WELCOME_BOT, send_request.message.sender.realm_id
).id
if (
welcome_bot_id in send_request.active_user_ids
and welcome_bot_id != send_request.message.sender_id
):
from zerver.lib.onboarding import send_welcome_bot_response
send_welcome_bot_response(send_request)
for queue_name, events in send_request.message.service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event["trigger"],
"user_profile_id": event["user_profile_id"],
},
)
return [send_request.message.id for send_request in send_message_requests]
class UserMessageLite:
"""
The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
"""
def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = flags
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(
message: Message,
rendering_result: MessageRenderingResult,
um_eligible_user_ids: AbstractSet[int],
long_term_idle_user_ids: AbstractSet[int],
stream_push_user_ids: AbstractSet[int],
stream_email_user_ids: AbstractSet[int],
mentioned_user_ids: AbstractSet[int],
mark_as_read_user_ids: Set[int],
limit_unread_user_ids: Optional[Set[int]],
) -> List[UserMessageLite]:
# These properties on the Message are set via
# render_markdown by code in the Markdown inline patterns
ids_with_alert_words = rendering_result.user_ids_with_alert_words
sender_id = message.sender.id
is_stream_message = message.is_stream_message()
base_flags = 0
if rendering_result.mentions_wildcard:
base_flags |= UserMessage.flags.wildcard_mentioned
if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:
base_flags |= UserMessage.flags.is_private
# For long_term_idle (aka soft-deactivated) users, we are allowed
# to optimize by lazily not creating UserMessage rows that would
# have the default 0 flag set (since the soft-reactivation logic
# knows how to create those when the user comes back). We need to
# create the UserMessage rows for these long_term_idle users
# non-lazily in a few cases:
#
# * There are nonzero flags (e.g. the user was mentioned), since
# that case is rare and this saves a lot of complexity in
# soft-reactivation.
#
# * If the user is going to be notified (e.g. they get push/email
# notifications for every message on a stream), since in that
# case the notifications code will call `access_message` on the
# message to re-verify permissions, and for private streams,
# will get an error if the UserMessage row doesn't exist yet.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
# for details on this system.
user_messages = []
for user_profile_id in um_eligible_user_ids:
flags = base_flags
if (
(user_profile_id == sender_id and message.sent_by_human())
or user_profile_id in mark_as_read_user_ids
or (limit_unread_user_ids is not None and user_profile_id not in limit_unread_user_ids)
):
flags |= UserMessage.flags.read
if user_profile_id in mentioned_user_ids:
flags |= UserMessage.flags.mentioned
if user_profile_id in ids_with_alert_words:
flags |= UserMessage.flags.has_alert_word
if (
user_profile_id in long_term_idle_user_ids
and user_profile_id not in stream_push_user_ids
and user_profile_id not in stream_email_user_ids
and is_stream_message
and int(flags) == 0
):
continue
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
flags=flags,
)
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
"""
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
"""
if not ums:
return
vals = [(um.user_profile_id, um.message_id, um.flags) for um in ums]
query = SQL(
"""
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES %s
"""
)
with connection.cursor() as cursor:
execute_values(cursor.cursor, query, vals)
def verify_submessage_sender(
*,
message_id: int,
message_sender_id: int,
submessage_sender_id: int,
) -> None:
"""Even though our submessage architecture is geared toward
collaboration among all message readers, we still enforce
the the first person to attach a submessage to the message
must be the original sender of the message.
"""
if message_sender_id == submessage_sender_id:
return
if SubMessage.objects.filter(
message_id=message_id,
sender_id=message_sender_id,
).exists():
return
raise JsonableError(_("You cannot attach a submessage to this message."))
def do_add_submessage(
realm: Realm,
sender_id: int,
message_id: int,
msg_type: str,
content: str,
) -> None:
"""Should be called while holding a SELECT FOR UPDATE lock
(e.g. via access_message(..., lock_message=True)) on the
Message row, to prevent race conditions.
"""
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type=msg_type,
content=content,
)
submessage.save()
event = dict(
type="submessage",
msg_type=msg_type,
message_id=message_id,
submessage_id=submessage.id,
sender_id=sender_id,
content=content,
)
ums = UserMessage.objects.filter(message_id=message_id)
target_user_ids = [um.user_profile_id for um in ums]
transaction.on_commit(lambda: send_event(realm, event, target_user_ids))
def notify_reaction_update(
user_profile: UserProfile, message: Message, reaction: Reaction, op: str
) -> None:
user_dict = {
"user_id": user_profile.id,
"email": user_profile.email,
"full_name": user_profile.full_name,
}
event: Dict[str, Any] = {
"type": "reaction",
"op": op,
"user_id": user_profile.id,
# TODO: We plan to remove this redundant user_dict object once
# clients are updated to support accessing use user_id. See
# https://github.com/zulip/zulip/pull/14711 for details.
"user": user_dict,
"message_id": message.id,
"emoji_name": reaction.emoji_name,
"emoji_code": reaction.emoji_code,
"reaction_type": reaction.reaction_type,
}
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message, plus subscribers of
# streams with the access to stream's full history.
#
# This means reactions won't live-update in preview narrows for a
# stream the user isn't yet subscribed to; this is the right
# performance tradeoff to avoid sending every reaction to public
# stream messages to all users.
#
# To ensure that reactions do live-update for any user who has
# actually participated in reacting to a message, we add a
# "historical" UserMessage row for any user who reacts to message,
# subscribing them to future notifications, even if they are not
# subscribed to the stream.
user_ids = set(
UserMessage.objects.filter(message=message.id).values_list("user_profile_id", flat=True)
)
if message.recipient.type == Recipient.STREAM:
stream_id = message.recipient.type_id
stream = Stream.objects.get(id=stream_id)
user_ids |= subscriber_ids_with_stream_history_access(stream)
transaction.on_commit(lambda: send_event(user_profile.realm, event, list(user_ids)))
def do_add_reaction(
user_profile: UserProfile,
message: Message,
emoji_name: str,
emoji_code: str,
reaction_type: str,
) -> None:
"""Should be called while holding a SELECT FOR UPDATE lock
(e.g. via access_message(..., lock_message=True)) on the
Message row, to prevent race conditions.
"""
reaction = Reaction(
user_profile=user_profile,
message=message,
emoji_name=emoji_name,
emoji_code=emoji_code,
reaction_type=reaction_type,
)
reaction.save()
notify_reaction_update(user_profile, message, reaction, "add")
def check_add_reaction(
user_profile: UserProfile,
message_id: int,
emoji_name: str,
emoji_code: Optional[str],
reaction_type: Optional[str],
) -> None:
message, user_message = access_message(user_profile, message_id, lock_message=True)
if emoji_code is None:
# The emoji_code argument is only required for rare corner
# cases discussed in the long block comment below. For simple
# API clients, we allow specifying just the name, and just
# look up the code using the current name->code mapping.
emoji_code = emoji_name_to_emoji_code(message.sender.realm, emoji_name)[0]
if reaction_type is None:
reaction_type = emoji_name_to_emoji_code(message.sender.realm, emoji_name)[1]
if Reaction.objects.filter(
user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type,
).exists():
raise JsonableError(_("Reaction already exists."))
query = Reaction.objects.filter(
message=message, emoji_code=emoji_code, reaction_type=reaction_type
)
if query.exists():
# If another user has already reacted to this message with
# same emoji code, we treat the new reaction as a vote for the
# existing reaction. So the emoji name used by that earlier
# reaction takes precedence over whatever was passed in this
# request. This is necessary to avoid a message having 2
# "different" emoji reactions with the same emoji code (and
# thus same image) on the same message, which looks ugly.
#
# In this "voting for an existing reaction" case, we shouldn't
# check whether the emoji code and emoji name match, since
# it's possible that the (emoji_type, emoji_name, emoji_code)
# triple for this existing reaction may not pass validation
# now (e.g. because it is for a realm emoji that has been
# since deactivated). We still want to allow users to add a
# vote any old reaction they see in the UI even if that is a
# deactivated custom emoji, so we just use the emoji name from
# the existing reaction with no further validation.
reaction = query.first()
assert reaction is not None
emoji_name = reaction.emoji_name
else:
# Otherwise, use the name provided in this request, but verify
# it is valid in the user's realm (e.g. not a deactivated
# realm emoji).
check_emoji_request(user_profile.realm, emoji_name, emoji_code, reaction_type)
if user_message is None:
# Users can see and react to messages sent to streams they
# were not a subscriber to; in order to receive events for
# those, we give the user a `historical` UserMessage objects
# for the message. This is the same trick we use for starring
# messages.
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read,
)
do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type)
def do_remove_reaction(
user_profile: UserProfile, message: Message, emoji_code: str, reaction_type: str
) -> None:
"""Should be called while holding a SELECT FOR UPDATE lock
(e.g. via access_message(..., lock_message=True)) on the
Message row, to prevent race conditions.
"""
reaction = Reaction.objects.filter(
user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type,
).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(
realm: Realm, sender: UserProfile, recipient_user_profiles: List[UserProfile], operator: str
) -> None:
sender_dict = {"user_id": sender.id, "email": sender.email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [
{"user_id": profile.id, "email": profile.email} for profile in recipient_user_profiles
]
event = dict(
type="typing",
message_type="private",
op=operator,
sender=sender_dict,
recipients=recipient_dicts,
)
# Only deliver the notification to active user recipients
user_ids_to_notify = [user.id for user in recipient_user_profiles if user.is_active]
send_event(realm, event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender: UserProfile, user_ids: List[int], operator: str) -> None:
realm = sender.realm
if sender.id not in user_ids:
user_ids.append(sender.id)
# If any of the user_ids being sent in are invalid, we will
# just reject the whole request, since a partial list of user_ids
# can create confusion related to huddles. Plus it's a good
# sign that a client is confused (or possibly even malicious) if
# we get bad user_ids.
user_profiles = []
for user_id in user_ids:
try:
# We include cross-bot realms as possible recipients,
# so that clients can know which huddle conversation
# is relevant here.
user_profile = get_user_by_id_in_realm_including_cross_realm(user_id, sender.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid user ID {}").format(user_id))
user_profiles.append(user_profile)
do_send_typing_notification(
realm=realm,
sender=sender,
recipient_user_profiles=user_profiles,
operator=operator,
)
def do_send_stream_typing_notification(
sender: UserProfile, operator: str, stream: Stream, topic: str
) -> None:
sender_dict = {"user_id": sender.id, "email": sender.email}
event = dict(
type="typing",
message_type="stream",
op=operator,
sender=sender_dict,
stream_id=stream.id,
topic=topic,
)
user_ids_to_notify = get_user_ids_for_streams({stream.id})[stream.id]
send_event(sender.realm, event, user_ids_to_notify)
def ensure_stream(
realm: Realm,
stream_name: str,
invite_only: bool = False,
stream_description: str = "",
*,
acting_user: Optional[UserProfile],
) -> Stream:
return create_stream_if_needed(
realm,
stream_name,
invite_only=invite_only,
stream_description=stream_description,
acting_user=acting_user,
)[0]
def get_recipient_from_user_profiles(
recipient_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile,
) -> Recipient:
# Avoid mutating the passed in list of recipient_profiles.
recipient_profiles_map = {user_profile.id: user_profile for user_profile in recipient_profiles}
if forwarded_mirror_message:
# In our mirroring integrations with some third-party
# protocols, bots subscribed to the third-party protocol
# forward to Zulip messages that they received in the
# third-party service. The permissions model for that
# forwarding is that users can only submit to Zulip private
# messages they personally received, and here we do the check
# for whether forwarder_user_profile is among the private
# message recipients of the message.
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profiles_map:
raise ValidationError(_("User not authorized for this query"))
# If the private message is just between the sender and
# another person, force it to be a personal internally
if len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map:
del recipient_profiles_map[sender.id]
assert recipient_profiles_map
if len(recipient_profiles_map) == 1:
[user_profile] = recipient_profiles_map.values()
return Recipient(
id=user_profile.recipient_id,
type=Recipient.PERSONAL,
type_id=user_profile.id,
)
# Otherwise, we need a huddle. Make sure the sender is included in huddle messages
recipient_profiles_map[sender.id] = sender
user_ids = set(recipient_profiles_map)
return get_huddle_recipient(user_ids)
def validate_recipient_user_profiles(
user_profiles: Sequence[UserProfile], sender: UserProfile, allow_deactivated: bool = False
) -> Sequence[UserProfile]:
recipient_profiles_map: Dict[int, UserProfile] = {}
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (
not user_profile.is_active
and not user_profile.is_mirror_dummy
and not allow_deactivated
) or user_profile.realm.deactivated:
raise ValidationError(
_("'{email}' is no longer using Zulip.").format(email=user_profile.email)
)
recipient_profiles_map[user_profile.id] = user_profile
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return list(recipient_profiles_map.values())
def recipient_for_user_profiles(
user_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile,
allow_deactivated: bool = False,
) -> Recipient:
recipient_profiles = validate_recipient_user_profiles(
user_profiles, sender, allow_deactivated=allow_deactivated
)
return get_recipient_from_user_profiles(
recipient_profiles, forwarded_mirror_message, forwarder_user_profile, sender
)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
query = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
sending_client=message.sending_client,
date_sent__gte=message.date_sent - time_window,
date_sent__lte=message.date_sent + time_window,
)
messages = filter_by_exact_message_topic(
query=query,
message=message,
)
if messages.exists():
return messages[0].id
return None
def extract_stream_indicator(s: str) -> Union[str, int]:
# Users can pass stream name as either an id or a name,
# and if they choose to pass a name, they may JSON encode
# it for legacy reasons.
try:
data = orjson.loads(s)
except orjson.JSONDecodeError:
# If there was no JSON encoding, then we just
# have a raw stream name.
return s
# We should stop supporting this odd use case
# once we improve our documentation.
if isinstance(data, list):
if len(data) != 1: # nocoverage
raise JsonableError(_("Expected exactly one stream"))
data = data[0]
if isinstance(data, str):
# We had a JSON-encoded stream name.
return data
if isinstance(data, int):
# We had a stream id.
return data
raise JsonableError(_("Invalid data type for stream"))
def extract_private_recipients(s: str) -> Union[List[str], List[int]]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = orjson.loads(s)
except orjson.JSONDecodeError:
data = s
if isinstance(data, str):
data = data.split(",")
if not isinstance(data, list):
raise JsonableError(_("Invalid data type for recipients"))
if not data:
# We don't complain about empty message recipients here
return data
if isinstance(data[0], str):
return get_validated_emails(data)
if not isinstance(data[0], int):
raise JsonableError(_("Invalid data type for recipients"))
return get_validated_user_ids(data)
def get_validated_user_ids(user_ids: Collection[int]) -> List[int]:
for user_id in user_ids:
if not isinstance(user_id, int):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(set(user_ids))
def get_validated_emails(emails: Collection[str]) -> List[str]:
for email in emails:
if not isinstance(email, str):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(filter(bool, {email.strip() for email in emails}))
def check_send_stream_message(
sender: UserProfile,
client: Client,
stream_name: str,
topic: str,
body: str,
realm: Optional[Realm] = None,
) -> int:
addressee = Addressee.for_stream_name(stream_name, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_stream_message_by_id(
sender: UserProfile,
client: Client,
stream_id: int,
topic: str,
body: str,
realm: Optional[Realm] = None,
) -> int:
addressee = Addressee.for_stream_id(stream_id, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_private_message(
sender: UserProfile, client: Client, receiving_user: UserProfile, body: str
) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(
sender: UserProfile,
client: Client,
message_type_name: str,
message_to: Union[Sequence[int], Sequence[str]],
topic_name: Optional[str],
message_content: str,
realm: Optional[Realm] = None,
forged: bool = False,
forged_timestamp: Optional[float] = None,
forwarder_user_profile: Optional[UserProfile] = None,
local_id: Optional[str] = None,
sender_queue_id: Optional[str] = None,
widget_content: Optional[str] = None,
*,
skip_stream_access_check: bool = False,
) -> int:
addressee = Addressee.legacy_build(sender, message_type_name, message_to, topic_name)
try:
message = check_message(
sender,
client,
addressee,
message_content,
realm,
forged,
forged_timestamp,
forwarder_user_profile,
local_id,
sender_queue_id,
widget_content,
skip_stream_access_check=skip_stream_access_check,
)
except ZephyrMessageAlreadySentException as e:
return e.message_id
return do_send_messages([message])[0]
def check_schedule_message(
sender: UserProfile,
client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str],
message_content: str,
delivery_type: str,
deliver_at: datetime.datetime,
realm: Optional[Realm] = None,
forwarder_user_profile: Optional[UserProfile] = None,
) -> int:
addressee = Addressee.legacy_build(sender, message_type_name, message_to, topic_name)
send_request = check_message(
sender,
client,
addressee,
message_content,
realm=realm,
forwarder_user_profile=forwarder_user_profile,
)
send_request.deliver_at = deliver_at
send_request.delivery_type = delivery_type
recipient = send_request.message.recipient
if delivery_type == "remind" and (
recipient.type != Recipient.STREAM and recipient.type_id != sender.id
):
raise JsonableError(_("Reminders can only be set for streams."))
return do_schedule_messages([send_request])[0]
def validate_message_edit_payload(
message: Message,
stream_id: Optional[int],
topic_name: Optional[str],
propagate_mode: Optional[str],
content: Optional[str],
) -> None:
"""
Checks that the data sent is well-formed. Does not handle editability, permissions etc.
"""
if topic_name is None and content is None and stream_id is None:
raise JsonableError(_("Nothing to change"))
if not message.is_stream_message():
if stream_id is not None:
raise JsonableError(_("Private messages cannot be moved to streams."))
if topic_name is not None:
raise JsonableError(_("Private messages cannot have topics."))
if propagate_mode != "change_one" and topic_name is None and stream_id is None:
raise JsonableError(_("Invalid propagate_mode without topic edit"))
if topic_name is not None:
check_stream_topic(topic_name)
if stream_id is not None and content is not None:
raise JsonableError(_("Cannot change message content while changing stream"))
# Right now, we prevent users from editing widgets.
if content is not None and is_widget_message(message):
raise JsonableError(_("Widgets cannot be edited."))
def can_edit_content_or_topic(
message: Message,
user_profile: UserProfile,
is_no_topic_msg: bool,
content: Optional[str] = None,
topic_name: Optional[str] = None,
) -> bool:
# You have permission to edit the message (both content and topic) if you sent it.
if message.sender_id == user_profile.id:
return True
# You cannot edit the content of message sent by someone else.
if content is not None:
return False
assert topic_name is not None
# The following cases are the various reasons a user might be
# allowed to edit topics.
# We allow anyone to edit (no topic) messages to help tend them.
if is_no_topic_msg:
return True
# The can_edit_topic_of_any_message helper returns whether the user can edit the topic
# or not based on edit_topic_policy setting and the user's role.
if user_profile.can_edit_topic_of_any_message():
return True
return False
def check_update_message(
user_profile: UserProfile,
message_id: int,
stream_id: Optional[int] = None,
topic_name: Optional[str] = None,
propagate_mode: str = "change_one",
send_notification_to_old_thread: bool = True,
send_notification_to_new_thread: bool = True,
content: Optional[str] = None,
) -> int:
"""This will update a message given the message id and user profile.
It checks whether the user profile has the permission to edit the message
and raises a JsonableError if otherwise.
It returns the number changed.
"""
message, ignored_user_message = access_message(user_profile, message_id)
if not user_profile.realm.allow_message_editing:
raise JsonableError(_("Your organization has turned off message editing"))
# The zerver/views/message_edit.py call point already strips this
# via REQ_topic; so we can delete this line if we arrange a
# contract where future callers in the embedded bots system strip
# use REQ_topic as well (or otherwise are guaranteed to strip input).
if topic_name is not None:
topic_name = topic_name.strip()
if topic_name == message.topic_name():
topic_name = None
validate_message_edit_payload(message, stream_id, topic_name, propagate_mode, content)
is_no_topic_msg = message.topic_name() == "(no topic)"
if content is not None or topic_name is not None:
if not can_edit_content_or_topic(
message, user_profile, is_no_topic_msg, content, topic_name
):
raise JsonableError(_("You don't have permission to edit this message"))
# If there is a change to the content, check that it hasn't been too long
# Allow an extra 20 seconds since we potentially allow editing 15 seconds
# past the limit, and in case there are network issues, etc. The 15 comes
# from (min_seconds_to_edit + seconds_left_buffer) in message_edit.js; if
# you change this value also change those two parameters in message_edit.js.
edit_limit_buffer = 20
if content is not None and user_profile.realm.message_content_edit_limit_seconds > 0:
deadline_seconds = user_profile.realm.message_content_edit_limit_seconds + edit_limit_buffer
if (timezone_now() - message.date_sent) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message has passed"))
# If there is a change to the topic, check that the user is allowed to
# edit it and that it has not been too long. If this is not the user who
# sent the message, they are not the admin, and the time limit for editing
# topics is passed, raise an error.
if (
topic_name is not None
and message.sender != user_profile
and not user_profile.is_realm_admin
and not user_profile.is_moderator
and not is_no_topic_msg
):
deadline_seconds = Realm.DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS + edit_limit_buffer
if (timezone_now() - message.date_sent) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message's topic has passed"))
rendering_result = None
links_for_embed: Set[str] = set()
prior_mention_user_ids: Set[int] = set()
mention_data: Optional[MentionData] = None
if content is not None:
if content.rstrip() == "":
content = "(deleted)"
content = normalize_body(content)
mention_backend = MentionBackend(user_profile.realm_id)
mention_data = MentionData(
mention_backend=mention_backend,
content=content,
)
user_info = get_user_info_for_message_updates(message.id)
prior_mention_user_ids = user_info["mention_user_ids"]
# We render the message using the current user's realm; since
# the cross-realm bots never edit messages, this should be
# always correct.
# Note: If rendering fails, the called code will raise a JsonableError.
rendering_result = render_incoming_message(
message,
content,
user_info["message_user_ids"],
user_profile.realm,
mention_data=mention_data,
)
links_for_embed |= rendering_result.links_for_preview
if message.is_stream_message() and rendering_result.mentions_wildcard:
stream = access_stream_by_id(user_profile, message.recipient.type_id)[0]
if not wildcard_mention_allowed(message.sender, stream):
raise JsonableError(
_("You do not have permission to use wildcard mentions in this stream.")
)
new_stream = None
number_changed = 0
if stream_id is not None:
assert message.is_stream_message()
if not user_profile.can_move_messages_between_streams():
raise JsonableError(_("You don't have permission to move this message"))
try:
access_stream_by_id(user_profile, message.recipient.type_id)
except JsonableError:
raise JsonableError(
_(
"You don't have permission to move this message due to missing access to its stream"
)
)
new_stream = access_stream_by_id(user_profile, stream_id, require_active=True)[0]
check_stream_access_based_on_stream_post_policy(user_profile, new_stream)
number_changed = do_update_message(
user_profile,
message,
new_stream,
topic_name,
propagate_mode,
send_notification_to_old_thread,
send_notification_to_new_thread,
content,
rendering_result,
prior_mention_user_ids,
mention_data,
)
if links_for_embed:
event_data = {
"message_id": message.id,
"message_content": message.content,
# The choice of `user_profile.realm_id` rather than
# `sender.realm_id` must match the decision made in the
# `render_incoming_message` call earlier in this function.
"message_realm_id": user_profile.realm_id,
"urls": list(links_for_embed),
}
queue_json_publish("embed_links", event_data)
return number_changed
def check_default_stream_group_name(group_name: str) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '{}'").format(group_name))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(
_("Default stream group name too long (limit: {} characters)").format(
DefaultStreamGroup.MAX_NAME_LENGTH,
)
)
for i in group_name:
if ord(i) == 0:
raise JsonableError(
_("Default stream group name '{}' contains NULL (0x00) characters.").format(
group_name,
)
)
def send_rate_limited_pm_notification_to_bot_owner(
sender: UserProfile, realm: Realm, content: str
) -> None:
"""
Sends a PM error notification to a bot's owner if one hasn't already
been sent in the last 5 minutes.
"""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
internal_send_private_message(
get_system_bot(settings.NOTIFICATION_BOT, sender.bot_owner.realm_id),
sender.bot_owner,
content,
)
sender.last_reminder = timezone_now()
sender.save(update_fields=["last_reminder"])
def send_pm_if_empty_stream(
stream: Optional[Stream],
realm: Realm,
sender: UserProfile,
stream_name: Optional[str] = None,
stream_id: Optional[int] = None,
) -> None:
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if not sender.is_bot or sender.bot_owner is None:
return
arg_dict = {
"bot_identity": f"`{sender.delivery_email}`",
"stream_id": stream_id,
"stream_name": f"#**{stream_name}**",
"new_stream_link": "#streams/new",
}
if sender.bot_owner is not None:
with override_language(sender.bot_owner.default_language):
if stream is None:
if stream_id is not None:
content = _(
"Your bot {bot_identity} tried to send a message to stream ID "
"{stream_id}, but there is no stream with that ID."
).format(**arg_dict)
else:
assert stream_name is not None
content = _(
"Your bot {bot_identity} tried to send a message to stream "
"{stream_name}, but that stream does not exist. "
"Click [here]({new_stream_link}) to create it."
).format(**arg_dict)
else:
if num_subscribers_for_stream_id(stream.id) > 0:
return
content = _(
"Your bot {bot_identity} tried to send a message to "
"stream {stream_name}. The stream exists but "
"does not have any subscribers."
).format(**arg_dict)
send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)
def validate_stream_name_with_pm_notification(
stream_name: str, realm: Realm, sender: UserProfile
) -> Stream:
stream_name = stream_name.strip()
check_stream_name(stream_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name)
raise StreamDoesNotExistError(escape(stream_name))
return stream
def validate_stream_id_with_pm_notification(
stream_id: int, realm: Realm, sender: UserProfile
) -> Stream:
try:
stream = get_stream_by_id_in_realm(stream_id, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id)
raise StreamWithIDDoesNotExistError(stream_id)
return stream
def check_private_message_policy(
realm: Realm, sender: UserProfile, user_profiles: Sequence[UserProfile]
) -> None:
if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED:
if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot):
# We allow PMs only between users and bots, to avoid
# breaking the tutorial as well as automated
# notifications from system bots to users.
return
raise JsonableError(_("Private messages are disabled in this organization."))
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(
sender: UserProfile,
client: Client,
addressee: Addressee,
message_content_raw: str,
realm: Optional[Realm] = None,
forged: bool = False,
forged_timestamp: Optional[float] = None,
forwarder_user_profile: Optional[UserProfile] = None,
local_id: Optional[str] = None,
sender_queue_id: Optional[str] = None,
widget_content: Optional[str] = None,
email_gateway: bool = False,
*,
skip_stream_access_check: bool = False,
mention_backend: Optional[MentionBackend] = None,
limit_unread_user_ids: Optional[Set[int]] = None,
) -> SendMessageRequest:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
stream = None
message_content = normalize_body(message_content_raw)
if realm is None:
realm = sender.realm
if addressee.is_stream():
topic_name = addressee.topic()
topic_name = truncate_topic(topic_name)
stream_name = addressee.stream_name()
stream_id = addressee.stream_id()
if stream_name is not None:
stream = validate_stream_name_with_pm_notification(stream_name, realm, sender)
elif stream_id is not None:
stream = validate_stream_id_with_pm_notification(stream_id, realm, sender)
else:
stream = addressee.stream()
assert stream is not None
# To save a database round trip, we construct the Recipient
# object for the Stream rather than fetching it from the
# database using the stream.recipient foreign key.
#
# This is simpler than ensuring that code paths that fetch a
# Stream that will be used for sending a message have a
# `select_related("recipient"), which would also needlessly
# expand Stream objects in memory (all the fields of Recipient
# are already known given the Stream object).
recipient = Recipient(
id=stream.recipient_id,
type_id=stream.id,
type=Recipient.STREAM,
)
if not skip_stream_access_check:
access_stream_for_send_message(
sender=sender, stream=stream, forwarder_user_profile=forwarder_user_profile
)
else:
# Defensive assertion - the only currently supported use case
# for this option is for outgoing webhook bots and since this
# is security-sensitive code, it's beneficial to ensure nothing
# else can sneak past the access check.
assert sender.bot_type == sender.OUTGOING_WEBHOOK_BOT
if realm.mandatory_topics and topic_name == "(no topic)":
raise JsonableError(_("Topics are required in this organization"))
elif addressee.is_private():
user_profiles = addressee.user_profiles()
mirror_message = client and client.name in [
"zephyr_mirror",
"irc_mirror",
"jabber_mirror",
"JabberMirror",
]
check_private_message_policy(realm, sender, user_profiles)
# API super-users who set the `forged` flag are allowed to
# forge messages sent by any user, so we disable the
# `forwarded_mirror_message` security check in that case.
forwarded_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(
user_profiles, forwarded_mirror_message, forwarder_user_profile, sender
)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
# This is defensive code--Addressee already validates
# the message type.
raise AssertionError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.set_topic_name(topic_name)
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.date_sent = timestamp_to_datetime(forged_timestamp)
else:
message.date_sent = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
raise ZephyrMessageAlreadySentException(id)
widget_content_dict = None
if widget_content is not None:
try:
widget_content_dict = orjson.loads(widget_content)
except orjson.JSONDecodeError:
raise JsonableError(_("Widgets: API programmer sent invalid JSON content"))
try:
check_widget_content(widget_content_dict)
except ValidationError as error:
raise JsonableError(
_("Widgets: {error_msg}").format(
error_msg=error.message,
)
)
message_send_dict = build_message_send_dict(
message=message,
stream=stream,
local_id=local_id,
sender_queue_id=sender_queue_id,
realm=realm,
widget_content_dict=widget_content_dict,
email_gateway=email_gateway,
mention_backend=mention_backend,
limit_unread_user_ids=limit_unread_user_ids,
)
if stream is not None and message_send_dict.rendering_result.mentions_wildcard:
if not wildcard_mention_allowed(sender, stream):
raise JsonableError(
_("You do not have permission to use wildcard mentions in this stream.")
)
return message_send_dict
def _internal_prep_message(
realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: str,
email_gateway: bool = False,
mention_backend: Optional[MentionBackend] = None,
limit_unread_user_ids: Optional[Set[int]] = None,
) -> Optional[SendMessageRequest]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
# Remove any null bytes from the content
if len(content) > settings.MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
# If we have a stream name, and the stream doesn't exist, we
# create it here (though this code path should probably be removed
# eventually, moving that responsibility to the caller). If
# addressee.stream_name() is None (i.e. we're sending to a stream
# by ID), we skip this, as the stream object must already exist.
if addressee.is_stream():
stream_name = addressee.stream_name()
if stream_name is not None:
ensure_stream(realm, stream_name, acting_user=sender)
try:
return check_message(
sender,
get_client("Internal"),
addressee,
content,
realm=realm,
email_gateway=email_gateway,
mention_backend=mention_backend,
limit_unread_user_ids=limit_unread_user_ids,
)
except JsonableError as e:
logging.exception(
"Error queueing internal message by %s: %s",
sender.delivery_email,
e.msg,
stack_info=True,
)
return None
def internal_prep_stream_message(
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool = False,
limit_unread_user_ids: Optional[Set[int]] = None,
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
"""
realm = stream.realm
addressee = Addressee.for_stream(stream, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
email_gateway=email_gateway,
limit_unread_user_ids=limit_unread_user_ids,
)
def internal_prep_stream_message_by_name(
realm: Realm,
sender: UserProfile,
stream_name: str,
topic: str,
content: str,
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream_name(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(
realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str,
mention_backend: Optional[MentionBackend] = None,
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
mention_backend=mention_backend,
)
def internal_send_private_message(
sender: UserProfile, recipient_user: UserProfile, content: str
) -> Optional[int]:
realm = recipient_user.realm
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message(
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool = False,
limit_unread_user_ids: Optional[Set[int]] = None,
) -> Optional[int]:
message = internal_prep_stream_message(
sender, stream, topic, content, email_gateway, limit_unread_user_ids=limit_unread_user_ids
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message_by_name(
realm: Realm,
sender: UserProfile,
stream_name: str,
topic: str,
content: str,
) -> Optional[int]:
message = internal_prep_stream_message_by_name(
realm,
sender,
stream_name,
topic,
content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_huddle_message(
realm: Realm, sender: UserProfile, emails: List[str], content: str
) -> Optional[int]:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def pick_colors(
used_colors: Set[str], color_map: Dict[int, str], recipient_ids: List[int]
) -> Dict[int, str]:
used_colors = set(used_colors)
recipient_ids = sorted(recipient_ids)
result = {}
other_recipient_ids = []
for recipient_id in recipient_ids:
if recipient_id in color_map:
color = color_map[recipient_id]
result[recipient_id] = color
used_colors.add(color)
else:
other_recipient_ids.append(recipient_id)
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
for i, recipient_id in enumerate(other_recipient_ids):
if i < len(available_colors):
color = available_colors[i]
else:
# We have to start re-using old colors, and we use recipient_id
# to choose the color.
color = STREAM_ASSIGNMENT_COLORS[recipient_id % len(STREAM_ASSIGNMENT_COLORS)]
result[recipient_id] = color
return result
def validate_user_access_to_subscribers(
user_profile: Optional[UserProfile], stream: Stream
) -> None:
"""Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{
"realm_id": stream.realm_id,
"is_web_public": stream.is_web_public,
"invite_only": stream.invite_only,
},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda user_profile: subscribed_to_stream(user_profile, stream.id),
)
def validate_user_access_to_subscribers_helper(
user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[UserProfile], bool],
) -> None:
"""Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Even guest users can access subscribers to web-public streams,
# since they can freely become subscribers to these streams.
if stream_dict["is_web_public"]:
return
# With the exception of web-public streams, a guest must
# be subscribed to a stream (even a public one) in order
# to see subscribers.
if user_profile.is_guest:
if check_user_subscribed(user_profile):
return
# We could explicitly handle the case where guests aren't
# subscribed here in an `else` statement or we can fall
# through to the subsequent logic. Tim prefers the latter.
# Adding an `else` would ensure better code coverage.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this stream"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if stream_dict["invite_only"] and not check_user_subscribed(user_profile):
raise JsonableError(_("Unable to retrieve subscribers for private stream"))
def bulk_get_subscriber_user_ids(
stream_dicts: Collection[Mapping[str, Any]],
user_profile: UserProfile,
subscribed_stream_ids: Set[int],
) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
stream_id = stream_dict["id"]
is_subscribed = stream_id in subscribed_stream_ids
try:
validate_user_access_to_subscribers_helper(
user_profile,
stream_dict,
lambda user_profile: is_subscribed,
)
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
recip_to_stream_id = {stream["recipient_id"]: stream["id"] for stream in target_stream_dicts}
recipient_ids = sorted(stream["recipient_id"] for stream in target_stream_dicts)
result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts}
if not recipient_ids:
return result
"""
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
"""
query = SQL(
"""
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
WHERE
zerver_subscription.recipient_id in %(recipient_ids)s AND
zerver_subscription.active AND
zerver_subscription.is_user_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
"""
)
cursor = connection.cursor()
cursor.execute(query, {"recipient_ids": tuple(recipient_ids)})
rows = cursor.fetchall()
cursor.close()
"""
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
"""
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
"""Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
return get_active_subscriptions_for_stream_id(stream.id, include_deactivated_users=False)
def get_subscriber_ids(stream: Stream, requesting_user: Optional[UserProfile] = None) -> List[str]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
return subscriptions_query.values_list("user_profile_id", flat=True)
@dataclass
class StreamInfo:
email_address: str
stream_weekly_traffic: Optional[int]
subscribers: List[int]
def send_subscription_add_events(
realm: Realm,
sub_info_list: List[SubInfo],
subscriber_dict: Dict[int, Set[int]],
) -> None:
info_by_user: Dict[int, List[SubInfo]] = defaultdict(list)
for sub_info in sub_info_list:
info_by_user[sub_info.user.id].append(sub_info)
stream_ids = {sub_info.stream.id for sub_info in sub_info_list}
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
# We generally only have a few streams, so we compute stream
# data in its own loop.
stream_info_dict: Dict[int, StreamInfo] = {}
for sub_info in sub_info_list:
stream = sub_info.stream
if stream.id not in stream_info_dict:
email_address = encode_email_address(stream, show_sender=True)
stream_weekly_traffic = get_average_weekly_stream_traffic(
stream.id, stream.date_created, recent_traffic
)
if stream.is_in_zephyr_realm and not stream.invite_only:
subscribers = []
else:
subscribers = list(subscriber_dict[stream.id])
stream_info_dict[stream.id] = StreamInfo(
email_address=email_address,
stream_weekly_traffic=stream_weekly_traffic,
subscribers=subscribers,
)
for user_id, sub_infos in info_by_user.items():
sub_dicts = []
for sub_info in sub_infos:
stream = sub_info.stream
stream_info = stream_info_dict[stream.id]
subscription = sub_info.sub
sub_dict = stream.to_dict()
for field_name in Subscription.API_FIELDS:
sub_dict[field_name] = getattr(subscription, field_name)
sub_dict["in_home_view"] = not subscription.is_muted
sub_dict["email_address"] = stream_info.email_address
sub_dict["stream_weekly_traffic"] = stream_info.stream_weekly_traffic
sub_dict["subscribers"] = stream_info.subscribers
sub_dicts.append(sub_dict)
# Send a notification to the user who subscribed.
event = dict(type="subscription", op="add", subscriptions=sub_dicts)
send_event(realm, event, [user_id])
SubT = Tuple[List[SubInfo], List[SubInfo]]
def bulk_add_subscriptions(
realm: Realm,
streams: Collection[Stream],
users: Iterable[UserProfile],
color_map: Mapping[str, str] = {},
from_user_creation: bool = False,
*,
acting_user: Optional[UserProfile],
) -> SubT:
users = list(users)
user_ids = [user.id for user in users]
# Sanity check out callers
for stream in streams:
assert stream.realm_id == realm.id
for user in users:
assert user.realm_id == realm.id
recipient_ids = [stream.recipient_id for stream in streams]
recipient_id_to_stream = {stream.recipient_id: stream for stream in streams}
recipient_color_map = {}
for stream in streams:
color: Optional[str] = color_map.get(stream.name, None)
if color is not None:
recipient_color_map[stream.recipient_id] = color
used_colors_for_user_ids: Dict[int, Set[str]] = get_used_colors_for_user_ids(user_ids)
existing_subs = Subscription.objects.filter(
user_profile_id__in=user_ids,
recipient__type=Recipient.STREAM,
recipient_id__in=recipient_ids,
)
subs_by_user: Dict[int, List[Subscription]] = defaultdict(list)
for sub in existing_subs:
subs_by_user[sub.user_profile_id].append(sub)
already_subscribed: List[SubInfo] = []
subs_to_activate: List[SubInfo] = []
subs_to_add: List[SubInfo] = []
for user_profile in users:
my_subs = subs_by_user[user_profile.id]
# Make a fresh set of all new recipient ids, and then we will
# remove any for which our user already has a subscription
# (and we'll re-activate any subscriptions as needed).
new_recipient_ids: Set[int] = {stream.recipient_id for stream in streams}
for sub in my_subs:
if sub.recipient_id in new_recipient_ids:
new_recipient_ids.remove(sub.recipient_id)
stream = recipient_id_to_stream[sub.recipient_id]
sub_info = SubInfo(user_profile, sub, stream)
if sub.active:
already_subscribed.append(sub_info)
else:
subs_to_activate.append(sub_info)
used_colors = used_colors_for_user_ids.get(user_profile.id, set())
user_color_map = pick_colors(used_colors, recipient_color_map, list(new_recipient_ids))
for recipient_id in new_recipient_ids:
stream = recipient_id_to_stream[recipient_id]
color = user_color_map[recipient_id]
sub = Subscription(
user_profile=user_profile,
is_user_active=user_profile.is_active,
active=True,
color=color,
recipient_id=recipient_id,
)
sub_info = SubInfo(user_profile, sub, stream)
subs_to_add.append(sub_info)
bulk_add_subs_to_db_with_logging(
realm=realm,
acting_user=acting_user,
subs_to_add=subs_to_add,
subs_to_activate=subs_to_activate,
)
altered_user_dict: Dict[int, Set[int]] = defaultdict(set)
for sub_info in subs_to_add + subs_to_activate:
altered_user_dict[sub_info.stream.id].add(sub_info.user.id)
stream_dict = {stream.id: stream for stream in streams}
new_streams = [stream_dict[stream_id] for stream_id in altered_user_dict]
subscriber_peer_info = bulk_get_subscriber_peer_info(
realm=realm,
streams=new_streams,
)
# We now send several types of events to notify browsers. The
# first batches of notifications are sent only to the user(s)
# being subscribed; we can skip these notifications when this is
# being called from the new user creation flow.
if not from_user_creation:
send_stream_creation_events_for_private_streams(
realm=realm,
stream_dict=stream_dict,
altered_user_dict=altered_user_dict,
)
send_subscription_add_events(
realm=realm,
sub_info_list=subs_to_add + subs_to_activate,
subscriber_dict=subscriber_peer_info.subscribed_ids,
)
send_peer_subscriber_events(
op="peer_add",
realm=realm,
altered_user_dict=altered_user_dict,
stream_dict=stream_dict,
private_peer_dict=subscriber_peer_info.private_peer_dict,
)
return (
subs_to_add + subs_to_activate,
already_subscribed,
)
# This function contains all the database changes as part of
# subscribing users to streams; we use a transaction to ensure that
# the RealmAuditLog entries are created atomically with the
# Subscription object creation (and updates).
@transaction.atomic(savepoint=False)
def bulk_add_subs_to_db_with_logging(
realm: Realm,
acting_user: Optional[UserProfile],
subs_to_add: List[SubInfo],
subs_to_activate: List[SubInfo],
) -> None:
Subscription.objects.bulk_create(info.sub for info in subs_to_add)
sub_ids = [info.sub.id for info in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
# Log subscription activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for sub_info in subs_to_add:
all_subscription_logs.append(
RealmAuditLog(
realm=realm,
acting_user=acting_user,
modified_user=sub_info.user,
modified_stream=sub_info.stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time,
)
)
for sub_info in subs_to_activate:
all_subscription_logs.append(
RealmAuditLog(
realm=realm,
acting_user=acting_user,
modified_user=sub_info.user,
modified_stream=sub_info.stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,
event_time=event_time,
)
)
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
def send_stream_creation_events_for_private_streams(
realm: Realm,
stream_dict: Dict[int, Stream],
altered_user_dict: Dict[int, Set[int]],
) -> None:
for stream_id, stream_users_ids in altered_user_dict.items():
stream = stream_dict[stream_id]
if not stream.is_public():
# Users newly added to invite-only streams
# need a `create` notification. The former, because
# they need the stream to exist before
# they get the "subscribe" notification, and the latter so
# they can manage the new stream.
# Realm admins already have all created private streams.
realm_admin_ids = {user.id for user in realm.get_admin_users_and_bots()}
notify_user_ids = list(stream_users_ids - realm_admin_ids)
if notify_user_ids:
send_stream_creation_event(stream, notify_user_ids)
def send_peer_subscriber_events(
op: str,
realm: Realm,
stream_dict: Dict[int, Stream],
altered_user_dict: Dict[int, Set[int]],
private_peer_dict: Dict[int, Set[int]],
) -> None:
# Send peer_add/peer_remove events to other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
assert op in ["peer_add", "peer_remove"]
private_stream_ids = [
stream_id for stream_id in altered_user_dict if stream_dict[stream_id].invite_only
]
for stream_id in private_stream_ids:
altered_user_ids = altered_user_dict[stream_id]
peer_user_ids = private_peer_dict[stream_id] - altered_user_ids
if peer_user_ids and altered_user_ids:
event = dict(
type="subscription",
op=op,
stream_ids=[stream_id],
user_ids=sorted(list(altered_user_ids)),
)
send_event(realm, event, peer_user_ids)
public_stream_ids = [
stream_id
for stream_id in altered_user_dict
if not stream_dict[stream_id].invite_only and not stream_dict[stream_id].is_in_zephyr_realm
]
if public_stream_ids:
user_streams: Dict[int, Set[int]] = defaultdict(set)
public_peer_ids = set(active_non_guest_user_ids(realm.id))
for stream_id in public_stream_ids:
altered_user_ids = altered_user_dict[stream_id]
peer_user_ids = public_peer_ids - altered_user_ids
if peer_user_ids and altered_user_ids:
if len(altered_user_ids) == 1:
# If we only have one user, we will try to
# find other streams they have (un)subscribed to
# (where it's just them). This optimization
# typically works when a single user is subscribed
# to multiple default public streams during
# new-user registration.
#
# This optimization depends on all public streams
# having the same peers for any single user, which
# isn't the case for private streams.
altered_user_id = list(altered_user_ids)[0]
user_streams[altered_user_id].add(stream_id)
else:
event = dict(
type="subscription",
op=op,
stream_ids=[stream_id],
user_ids=sorted(list(altered_user_ids)),
)
send_event(realm, event, peer_user_ids)
for user_id, stream_ids in user_streams.items():
peer_user_ids = public_peer_ids - {user_id}
event = dict(
type="subscription",
op=op,
stream_ids=sorted(list(stream_ids)),
user_ids=[user_id],
)
send_event(realm, event, peer_user_ids)
def send_peer_remove_events(
realm: Realm,
streams: List[Stream],
altered_user_dict: Dict[int, Set[int]],
) -> None:
private_streams = [stream for stream in streams if stream.invite_only]
private_peer_dict = bulk_get_private_peers(
realm=realm,
private_streams=private_streams,
)
stream_dict = {stream.id: stream for stream in streams}
send_peer_subscriber_events(
op="peer_remove",
realm=realm,
stream_dict=stream_dict,
altered_user_dict=altered_user_dict,
private_peer_dict=private_peer_dict,
)
def get_available_notification_sounds() -> List[str]:
notification_sounds_path = static_path("audio/notification_sounds")
available_notification_sounds = []
for file_name in os.listdir(notification_sounds_path):
root, ext = os.path.splitext(file_name)
if "." in root: # nocoverage
# Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming)
# to avoid spurious duplicates.
continue
if ext == ".ogg":
available_notification_sounds.append(root)
return sorted(available_notification_sounds)
def notify_subscriptions_removed(
realm: Realm, user_profile: UserProfile, streams: Iterable[Stream]
) -> None:
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove", subscriptions=payload)
send_event(realm, event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(
realm: Realm,
users: Iterable[UserProfile],
streams: Iterable[Stream],
*,
acting_user: Optional[UserProfile],
) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
# Sanity check our callers
for stream in streams:
assert stream.realm_id == realm.id
for user in users:
assert user.realm_id == realm.id
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, streams)
def get_non_subscribed_subs() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed: List[Tuple[UserProfile, Stream]] = []
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {sub_info.stream.id for sub_info in user_sub_stream_info}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_subs()
subs_to_deactivate: List[SubInfo] = []
sub_ids_to_deactivate: List[int] = []
# This loop just flattens out our data into big lists for
# bulk operations.
for sub_infos in existing_subs_by_user.values():
for sub_info in sub_infos:
subs_to_deactivate.append(sub_info)
sub_ids_to_deactivate.append(sub_info.sub.id)
# We do all the database changes in a transaction to ensure
# RealmAuditLog entries are atomically created when making changes.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
).update(active=False)
occupied_streams_after = list(get_occupied_streams(realm))
# Log subscription activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs = [
RealmAuditLog(
realm=sub_info.user.realm,
acting_user=acting_user,
modified_user=sub_info.user,
modified_stream=sub_info.stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time=event_time,
)
for sub_info in subs_to_deactivate
]
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
altered_user_dict: Dict[int, Set[int]] = defaultdict(set)
streams_by_user: Dict[int, List[Stream]] = defaultdict(list)
for sub_info in subs_to_deactivate:
stream = sub_info.stream
streams_by_user[sub_info.user.id].append(stream)
altered_user_dict[stream.id].add(sub_info.user.id)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(realm, user_profile, streams_by_user[user_profile.id])
event = {
"type": "mark_stream_messages_as_read",
"user_profile_id": user_profile.id,
"stream_recipient_ids": [stream.recipient_id for stream in streams],
}
queue_json_publish("deferred_work", event)
send_peer_remove_events(
realm=realm,
streams=streams,
altered_user_dict=altered_user_dict,
)
new_vacant_streams = set(occupied_streams_before) - set(occupied_streams_after)
new_vacant_private_streams = [stream for stream in new_vacant_streams if stream.invite_only]
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream, acting_user=acting_user)
return (
[(sub_info.user, sub_info.stream) for sub_info in subs_to_deactivate],
not_subscribed,
)
def do_change_subscription_property(
user_profile: UserProfile,
sub: Subscription,
stream: Stream,
property_name: str,
value: Any,
*,
acting_user: Optional[UserProfile],
) -> None:
database_property_name = property_name
event_property_name = property_name
database_value = value
event_value = value
# For this property, is_muted is used in the database, but
# in_home_view in the API, since we haven't migrated the events
# API to the new name yet.
if property_name == "in_home_view":
database_property_name = "is_muted"
database_value = not value
if property_name == "is_muted":
event_property_name = "in_home_view"
event_value = not value
old_value = getattr(sub, database_property_name)
setattr(sub, database_property_name, database_value)
sub.save(update_fields=[database_property_name])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.SUBSCRIPTION_PROPERTY_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
modified_stream=stream,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: database_value,
"property": database_property_name,
}
).decode(),
)
event = dict(
type="subscription",
op="update",
property=event_property_name,
value=event_value,
stream_id=stream.id,
)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_password(user_profile: UserProfile, password: str, commit: bool = True) -> None:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_PASSWORD_CHANGED,
event_time=event_time,
)
def do_change_full_name(
user_profile: UserProfile, full_name: str, acting_user: Optional[UserProfile]
) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,
event_time=event_time,
extra_data=old_name,
)
payload = dict(user_id=user_profile.id, full_name=user_profile.full_name)
send_event(
user_profile.realm,
dict(type="realm_user", op="update", person=payload),
active_user_ids(user_profile.realm_id),
)
if user_profile.is_bot:
send_event(
user_profile.realm,
dict(type="realm_bot", op="update", bot=payload),
bot_owner_user_ids(user_profile),
)
def check_change_full_name(
user_profile: UserProfile, full_name_raw: str, acting_user: Optional[UserProfile]
) -> str:
"""Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace)."""
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def check_change_bot_full_name(
user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile
) -> None:
new_full_name = check_full_name(full_name_raw)
if new_full_name == user_profile.full_name:
# Our web app will try to patch full_name even if the user didn't
# modify the name in the form. We just silently ignore those
# situations.
return
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=new_full_name,
)
do_change_full_name(user_profile, new_full_name, acting_user)
@transaction.atomic(durable=True)
def do_change_bot_owner(
user_profile: UserProfile, bot_owner: UserProfile, acting_user: UserProfile
) -> None:
previous_owner = user_profile.bot_owner
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,
event_time=event_time,
)
update_users = bot_owner_user_ids(user_profile)
# For admins, update event is sent instead of delete/add
# event. bot_data of admin contains all the
# bots and none of them should be removed/(added again).
# Delete the bot from previous owner's bot data.
if previous_owner and not previous_owner.is_realm_admin:
delete_event = dict(
type="realm_bot",
op="delete",
bot=dict(
user_id=user_profile.id,
),
)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
delete_event,
{previous_owner.id},
)
)
# Do not send update event for previous bot owner.
update_users = update_users - {previous_owner.id}
# Notify the new owner that the bot has been added.
if not bot_owner.is_realm_admin:
add_event = created_bot_event(user_profile)
transaction.on_commit(lambda: send_event(user_profile.realm, add_event, {bot_owner.id}))
# Do not send update event for bot_owner.
update_users = update_users - {bot_owner.id}
bot_event = dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
),
)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
bot_event,
update_users,
)
)
# Since `bot_owner_id` is included in the user profile dict we need
# to update the users dict with the new bot owner id
event = dict(
type="realm_user",
op="update",
person=dict(
user_id=user_profile.id,
bot_owner_id=user_profile.bot_owner.id,
),
)
transaction.on_commit(
lambda: send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
)
@transaction.atomic(durable=True)
def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_TERMS_OF_SERVICE_VERSION_CHANGED,
event_time=event_time,
)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str:
old_api_key = user_profile.api_key
new_api_key = generate_api_key()
user_profile.api_key = new_api_key
user_profile.save(update_fields=["api_key"])
# We need to explicitly delete the old API key from our caches,
# because the on-save handler for flushing the UserProfile object
# in zerver/lib/cache.py only has access to the new API key.
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_API_KEY_CHANGED,
event_time=event_time,
)
if user_profile.is_bot:
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
api_key=new_api_key,
),
),
bot_owner_user_ids(user_profile),
)
event = {"type": "clear_push_device_tokens", "user_profile_id": user_profile.id}
queue_json_publish("deferred_work", event)
return new_api_key
def notify_avatar_url_change(user_profile: UserProfile) -> None:
if user_profile.is_bot:
bot_event = dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
),
)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
bot_event,
bot_owner_user_ids(user_profile),
)
)
payload = dict(
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
avatar_version=user_profile.avatar_version,
# Even clients using client_gravatar don't need the email,
# since we're sending the URL anyway.
user_id=user_profile.id,
)
event = dict(type="realm_user", op="update", person=payload)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
event,
active_user_ids(user_profile.realm_id),
)
)
@transaction.atomic(savepoint=False)
def do_change_avatar_fields(
user_profile: UserProfile,
avatar_source: str,
skip_notify: bool = False,
*,
acting_user: Optional[UserProfile],
) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
extra_data={"avatar_source": avatar_source},
event_time=event_time,
acting_user=acting_user,
)
if not skip_notify:
notify_avatar_url_change(user_profile)
def do_delete_avatar_image(user: UserProfile, *, acting_user: Optional[UserProfile]) -> None:
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=acting_user)
delete_avatar_image(user)
@transaction.atomic(durable=True)
def do_change_icon_source(
realm: Realm, icon_source: str, *, acting_user: Optional[UserProfile]
) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_ICON_SOURCE_CHANGED,
extra_data={"icon_source": icon_source, "icon_version": realm.icon_version},
event_time=event_time,
acting_user=acting_user,
)
event = dict(
type="realm",
op="update_dict",
property="icon",
data=dict(icon_source=realm.icon_source, icon_url=realm_icon_url(realm)),
)
transaction.on_commit(
lambda: send_event(
realm,
event,
active_user_ids(realm.id),
)
)
@transaction.atomic(durable=True)
def do_change_logo_source(
realm: Realm, logo_source: str, night: bool, *, acting_user: Optional[UserProfile]
) -> None:
if not night:
realm.logo_source = logo_source
realm.logo_version += 1
realm.save(update_fields=["logo_source", "logo_version"])
else:
realm.night_logo_source = logo_source
realm.night_logo_version += 1
realm.save(update_fields=["night_logo_source", "night_logo_version"])
RealmAuditLog.objects.create(
event_type=RealmAuditLog.REALM_LOGO_CHANGED,
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
)
event = dict(
type="realm",
op="update_dict",
property="night_logo" if night else "logo",
data=get_realm_logo_data(realm, night),
)
transaction.on_commit(lambda: send_event(realm, event, active_user_ids(realm.id)))
@transaction.atomic(durable=True)
def do_change_realm_org_type(
realm: Realm,
org_type: int,
acting_user: Optional[UserProfile],
) -> None:
old_value = realm.org_type
realm.org_type = org_type
realm.save(update_fields=["org_type"])
RealmAuditLog.objects.create(
event_type=RealmAuditLog.REALM_ORG_TYPE_CHANGED,
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
extra_data={"old_value": old_value, "new_value": org_type},
)
@transaction.atomic(savepoint=False)
def do_change_realm_plan_type(
realm: Realm, plan_type: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.plan_type
realm.plan_type = plan_type
realm.save(update_fields=["plan_type"])
RealmAuditLog.objects.create(
event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
extra_data={"old_value": old_value, "new_value": plan_type},
)
if plan_type == Realm.PLAN_TYPE_PLUS:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.PLAN_TYPE_STANDARD:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.PLAN_TYPE_SELF_HOSTED:
realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter.
realm.message_visibility_limit = None
realm.upload_quota_gb = None
elif plan_type == Realm.PLAN_TYPE_STANDARD_FREE:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.PLAN_TYPE_LIMITED:
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED
else:
raise AssertionError("Invalid plan type")
update_first_visible_message_id(realm)
realm.save(update_fields=["_max_invites", "message_visibility_limit", "upload_quota_gb"])
event = {
"type": "realm",
"op": "update",
"property": "plan_type",
"value": plan_type,
"extra_data": {"upload_quota": realm.upload_quota_bytes()},
}
transaction.on_commit(lambda: send_event(realm, event, active_user_ids(realm.id)))
@transaction.atomic(durable=True)
def do_change_default_sending_stream(
user_profile: UserProfile, stream: Optional[Stream], *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.default_sending_stream_id
user_profile.default_sending_stream = stream
user_profile.save(update_fields=["default_sending_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_DEFAULT_SENDING_STREAM_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: None if stream is None else stream.id,
}
).decode(),
)
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
event = dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
default_sending_stream=stream_name,
),
)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
event,
bot_owner_user_ids(user_profile),
)
)
@transaction.atomic(durable=True)
def do_change_default_events_register_stream(
user_profile: UserProfile, stream: Optional[Stream], *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.default_events_register_stream_id
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=["default_events_register_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_DEFAULT_REGISTER_STREAM_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: None if stream is None else stream.id,
}
).decode(),
)
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
event = dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
default_events_register_stream=stream_name,
),
)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
event,
bot_owner_user_ids(user_profile),
)
)
@transaction.atomic(durable=True)
def do_change_default_all_public_streams(
user_profile: UserProfile, value: bool, *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.default_all_public_streams
user_profile.default_all_public_streams = value
user_profile.save(update_fields=["default_all_public_streams"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_DEFAULT_ALL_PUBLIC_STREAMS_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
}
).decode(),
)
if user_profile.is_bot:
event = dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
),
)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
event,
bot_owner_user_ids(user_profile),
)
)
@transaction.atomic(durable=True)
def do_change_user_role(
user_profile: UserProfile, value: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.role
user_profile.role = value
user_profile.save(update_fields=["role"])
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_ROLE_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
event = dict(
type="realm_user", op="update", person=dict(user_id=user_profile.id, role=user_profile.role)
)
transaction.on_commit(
lambda: send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
)
def do_make_user_billing_admin(user_profile: UserProfile) -> None:
user_profile.is_billing_admin = True
user_profile.save(update_fields=["is_billing_admin"])
event = dict(
type="realm_user", op="update", person=dict(user_id=user_profile.id, is_billing_admin=True)
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_can_forge_sender(user_profile: UserProfile, value: bool) -> None:
user_profile.can_forge_sender = value
user_profile.save(update_fields=["can_forge_sender"])
def do_change_can_create_users(user_profile: UserProfile, value: bool) -> None:
user_profile.can_create_users = value
user_profile.save(update_fields=["can_create_users"])
def send_change_stream_permission_notification(
stream: Stream,
*,
old_policy_name: str,
new_policy_name: str,
acting_user: UserProfile,
) -> None:
sender = get_system_bot(settings.NOTIFICATION_BOT, acting_user.realm_id)
user_mention = silent_mention_syntax_for_user(acting_user)
with override_language(stream.realm.default_language):
notification_string = _(
"{user} changed the [access permissions](/help/stream-permissions) "
"for this stream from **{old_policy}** to **{new_policy}**."
)
notification_string = notification_string.format(
user=user_mention,
old_policy=old_policy_name,
new_policy=new_policy_name,
)
internal_send_stream_message(
sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, notification_string
)
def do_change_stream_permission(
stream: Stream,
*,
invite_only: Optional[bool] = None,
history_public_to_subscribers: Optional[bool] = None,
is_web_public: Optional[bool] = None,
acting_user: UserProfile,
) -> None:
old_invite_only_value = stream.invite_only
old_history_public_to_subscribers_value = stream.history_public_to_subscribers
old_is_web_public_value = stream.is_web_public
# A note on these assertions: It's possible we'd be better off
# making all callers of this function pass the full set of
# parameters, rather than having default values. Doing so would
# allow us to remove the messy logic below, where we sometimes
# ignore the passed parameters.
#
# But absent such a refactoring, it's important to assert that
# we're not requesting an unsupported configurations.
if is_web_public:
assert history_public_to_subscribers is not False
assert invite_only is not True
stream.is_web_public = True
stream.invite_only = False
stream.history_public_to_subscribers = True
else:
assert invite_only is not None
# is_web_public is falsey
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
stream.realm,
invite_only,
history_public_to_subscribers,
)
stream.invite_only = invite_only
stream.history_public_to_subscribers = history_public_to_subscribers
stream.is_web_public = False
with transaction.atomic():
stream.save(update_fields=["invite_only", "history_public_to_subscribers", "is_web_public"])
event_time = timezone_now()
if old_invite_only_value != stream.invite_only:
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_PROPERTY_CHANGED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_invite_only_value,
RealmAuditLog.NEW_VALUE: stream.invite_only,
"property": "invite_only",
}
).decode(),
)
if old_history_public_to_subscribers_value != stream.history_public_to_subscribers:
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_PROPERTY_CHANGED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_history_public_to_subscribers_value,
RealmAuditLog.NEW_VALUE: stream.history_public_to_subscribers,
"property": "history_public_to_subscribers",
}
).decode(),
)
if old_is_web_public_value != stream.is_web_public:
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_PROPERTY_CHANGED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_is_web_public_value,
RealmAuditLog.NEW_VALUE: stream.is_web_public,
"property": "is_web_public",
}
).decode(),
)
event = dict(
op="update",
type="stream",
property="invite_only",
value=stream.invite_only,
history_public_to_subscribers=stream.history_public_to_subscribers,
is_web_public=stream.is_web_public,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
old_policy_name = get_stream_permission_policy_name(
invite_only=old_invite_only_value,
history_public_to_subscribers=old_history_public_to_subscribers_value,
is_web_public=old_is_web_public_value,
)
new_policy_name = get_stream_permission_policy_name(
invite_only=stream.invite_only,
history_public_to_subscribers=stream.history_public_to_subscribers,
is_web_public=stream.is_web_public,
)
send_change_stream_permission_notification(
stream,
old_policy_name=old_policy_name,
new_policy_name=new_policy_name,
acting_user=acting_user,
)
def send_change_stream_post_policy_notification(
stream: Stream, *, old_post_policy: int, new_post_policy: int, acting_user: UserProfile
) -> None:
sender = get_system_bot(settings.NOTIFICATION_BOT, acting_user.realm_id)
user_mention = silent_mention_syntax_for_user(acting_user)
with override_language(stream.realm.default_language):
notification_string = _(
"{user} changed the [posting permissions](/help/stream-sending-policy) "
"for this stream:\n\n"
"* **Old permissions**: {old_policy}.\n"
"* **New permissions**: {new_policy}.\n"
)
notification_string = notification_string.format(
user=user_mention,
old_policy=Stream.POST_POLICIES[old_post_policy],
new_policy=Stream.POST_POLICIES[new_post_policy],
)
internal_send_stream_message(
sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, notification_string
)
def do_change_stream_post_policy(
stream: Stream, stream_post_policy: int, *, acting_user: UserProfile
) -> None:
old_post_policy = stream.stream_post_policy
with transaction.atomic():
stream.stream_post_policy = stream_post_policy
stream.save(update_fields=["stream_post_policy"])
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_PROPERTY_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_post_policy,
RealmAuditLog.NEW_VALUE: stream_post_policy,
"property": "stream_post_policy",
}
).decode(),
)
event = dict(
op="update",
type="stream",
property="stream_post_policy",
value=stream_post_policy,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
# Backwards-compatibility code: We removed the
# is_announcement_only property in early 2020, but we send a
# duplicate event for legacy mobile clients that might want the
# data.
event = dict(
op="update",
type="stream",
property="is_announcement_only",
value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
send_change_stream_post_policy_notification(
stream,
old_post_policy=old_post_policy,
new_post_policy=stream_post_policy,
acting_user=acting_user,
)
def do_rename_stream(stream: Stream, new_name: str, user_profile: UserProfile) -> Dict[str, str]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=user_profile,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_NAME_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_name,
RealmAuditLog.NEW_VALUE: new_name,
}
).decode(),
)
recipient_id = stream.recipient_id
messages = Message.objects.filter(recipient_id=recipient_id).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient_id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream, show_sender=True)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
["email_address", new_email],
["name", new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
sender = get_system_bot(settings.NOTIFICATION_BOT, stream.realm_id)
with override_language(stream.realm.default_language):
internal_send_stream_message(
sender,
stream,
Realm.STREAM_EVENTS_NOTIFICATION_TOPIC,
_("{user_name} renamed stream {old_stream_name} to {new_stream_name}.").format(
user_name=silent_mention_syntax_for_user(user_profile),
old_stream_name=f"**{old_name}**",
new_stream_name=f"**{new_name}**",
),
)
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def send_change_stream_description_notification(
stream: Stream, *, old_description: str, new_description: str, acting_user: UserProfile
) -> None:
sender = get_system_bot(settings.NOTIFICATION_BOT, acting_user.realm_id)
user_mention = silent_mention_syntax_for_user(acting_user)
with override_language(stream.realm.default_language):
notification_string = _(
"{user} changed the description for this stream.\n\n"
"* **Old description:**\n"
"``` quote\n"
"{old_description}\n"
"```\n"
"* **New description:**\n"
"``` quote\n"
"{new_description}\n"
"```"
)
notification_string = notification_string.format(
user=user_mention,
old_description=old_description,
new_description=new_description,
)
internal_send_stream_message(
sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, notification_string
)
def do_change_stream_description(
stream: Stream, new_description: str, *, acting_user: UserProfile
) -> None:
old_description = stream.description
with transaction.atomic():
stream.description = new_description
stream.rendered_description = render_stream_description(new_description)
stream.save(update_fields=["description", "rendered_description"])
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_PROPERTY_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_description,
RealmAuditLog.NEW_VALUE: new_description,
"property": "description",
}
).decode(),
)
event = dict(
type="stream",
op="update",
property="description",
name=stream.name,
stream_id=stream.id,
value=new_description,
rendered_description=stream.rendered_description,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
send_change_stream_description_notification(
stream,
old_description=old_description,
new_description=new_description,
acting_user=acting_user,
)
def send_change_stream_message_retention_days_notification(
user_profile: UserProfile, stream: Stream, old_value: Optional[int], new_value: Optional[int]
) -> None:
sender = get_system_bot(settings.NOTIFICATION_BOT, user_profile.realm_id)
user_mention = silent_mention_syntax_for_user(user_profile)
# If switching from or to the organization's default retention policy,
# we want to take the realm's default into account.
if old_value is None:
old_value = stream.realm.message_retention_days
if new_value is None:
new_value = stream.realm.message_retention_days
with override_language(stream.realm.default_language):
if old_value == Stream.MESSAGE_RETENTION_SPECIAL_VALUES_MAP["unlimited"]:
old_retention_period = _("Forever")
new_retention_period = f"{new_value} days"
summary_line = f"Messages in this stream will now be automatically deleted {new_value} days after they are sent."
elif new_value == Stream.MESSAGE_RETENTION_SPECIAL_VALUES_MAP["unlimited"]:
old_retention_period = f"{old_value} days"
new_retention_period = _("Forever")
summary_line = _("Messages in this stream will now be retained forever.")
else:
old_retention_period = f"{old_value} days"
new_retention_period = f"{new_value} days"
summary_line = f"Messages in this stream will now be automatically deleted {new_value} days after they are sent."
notification_string = _(
"{user} has changed the [message retention period](/help/message-retention-policy) for this stream:\n"
"* **Old retention period**: {old_retention_period}\n"
"* **New retention period**: {new_retention_period}\n\n"
"{summary_line}"
)
notification_string = notification_string.format(
user=user_mention,
old_retention_period=old_retention_period,
new_retention_period=new_retention_period,
summary_line=summary_line,
)
internal_send_stream_message(
sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, notification_string
)
def do_change_stream_message_retention_days(
stream: Stream, acting_user: UserProfile, message_retention_days: Optional[int] = None
) -> None:
old_message_retention_days_value = stream.message_retention_days
with transaction.atomic():
stream.message_retention_days = message_retention_days
stream.save(update_fields=["message_retention_days"])
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_MESSAGE_RETENTION_DAYS_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_message_retention_days_value,
RealmAuditLog.NEW_VALUE: message_retention_days,
}
).decode(),
)
event = dict(
op="update",
type="stream",
property="message_retention_days",
value=message_retention_days,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
send_change_stream_message_retention_days_notification(
user_profile=acting_user,
stream=stream,
old_value=old_message_retention_days_value,
new_value=message_retention_days,
)
def set_realm_permissions_based_on_org_type(realm: Realm) -> None:
"""This function implements overrides for the default configuration
for new organizations when the administrator selected specific
organization types.
This substantially simplifies our /help/ advice for folks setting
up new organizations of these types.
"""
# Custom configuration for educational organizations. The present
# defaults are designed for a single class, not a department or
# larger institution, since those are more common.
if (
realm.org_type == Realm.ORG_TYPES["education_nonprofit"]["id"]
or realm.org_type == Realm.ORG_TYPES["education"]["id"]
):
# Limit email address visibility and user creation to administrators.
realm.email_address_visibility = Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS
realm.invite_to_realm_policy = Realm.POLICY_ADMINS_ONLY
# Restrict public stream creation to staff, but allow private
# streams (useful for study groups, etc.).
realm.create_public_stream_policy = Realm.POLICY_ADMINS_ONLY
# Don't allow members (students) to manage user groups or
# stream subscriptions.
realm.user_group_edit_policy = Realm.POLICY_MODERATORS_ONLY
realm.invite_to_stream_policy = Realm.POLICY_MODERATORS_ONLY
# Allow moderators (TAs?) to move topics between streams.
realm.move_messages_between_streams_policy = Realm.POLICY_MODERATORS_ONLY
def do_create_realm(
string_id: str,
name: str,
*,
emails_restricted_to_domains: Optional[bool] = None,
email_address_visibility: Optional[int] = None,
description: Optional[str] = None,
invite_required: Optional[bool] = None,
plan_type: Optional[int] = None,
org_type: Optional[int] = None,
date_created: Optional[datetime.datetime] = None,
is_demo_organization: Optional[bool] = False,
enable_spectator_access: Optional[bool] = False,
) -> Realm:
if string_id == settings.SOCIAL_AUTH_SUBDOMAIN:
raise AssertionError("Creating a realm on SOCIAL_AUTH_SUBDOMAIN is not allowed!")
if Realm.objects.filter(string_id=string_id).exists():
raise AssertionError(f"Realm {string_id} already exists!")
if not server_initialized():
logging.info("Server not yet initialized. Creating the internal realm first.")
create_internal_realm()
kwargs: Dict[str, Any] = {}
if emails_restricted_to_domains is not None:
kwargs["emails_restricted_to_domains"] = emails_restricted_to_domains
if email_address_visibility is not None:
kwargs["email_address_visibility"] = email_address_visibility
if description is not None:
kwargs["description"] = description
if invite_required is not None:
kwargs["invite_required"] = invite_required
if plan_type is not None:
kwargs["plan_type"] = plan_type
if org_type is not None:
kwargs["org_type"] = org_type
if enable_spectator_access is not None:
kwargs["enable_spectator_access"] = enable_spectator_access
if date_created is not None:
# The date_created parameter is intended only for use by test
# suites that want to backdate the date of a realm's creation.
assert not settings.PRODUCTION
kwargs["date_created"] = date_created
with transaction.atomic():
realm = Realm(string_id=string_id, name=name, **kwargs)
if is_demo_organization:
realm.demo_organization_scheduled_deletion_date = (
realm.date_created + datetime.timedelta(days=settings.DEMO_ORG_DEADLINE_DAYS)
)
set_realm_permissions_based_on_org_type(realm)
realm.save()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_CREATED, event_time=realm.date_created
)
RealmUserDefault.objects.create(realm=realm)
# Create stream once Realm object has been saved
notifications_stream = ensure_stream(
realm,
Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
stream_description="Everyone is added to this stream by default. Welcome! :octopus:",
acting_user=None,
)
realm.notifications_stream = notifications_stream
# With the current initial streams situation, the only public
# stream is the notifications_stream.
DefaultStream.objects.create(stream=notifications_stream, realm=realm)
signup_notifications_stream = ensure_stream(
realm,
Realm.INITIAL_PRIVATE_STREAM_NAME,
invite_only=True,
stream_description="A private stream for core team members.",
acting_user=None,
)
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=["notifications_stream", "signup_notifications_stream"])
if plan_type is None and settings.BILLING_ENABLED:
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_LIMITED, acting_user=None)
admin_realm = get_realm(settings.SYSTEM_BOT_REALM)
sender = get_system_bot(settings.NOTIFICATION_BOT, admin_realm.id)
# Send a notification to the admin realm
signup_message = _("Signups enabled")
try:
signups_stream = get_signups_stream(admin_realm)
topic = realm.display_subdomain
internal_send_stream_message(
sender,
signups_stream,
topic,
signup_message,
)
except Stream.DoesNotExist: # nocoverage
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
return realm
def update_scheduled_email_notifications_time(
user_profile: UserProfile, old_batching_period: int, new_batching_period: int
) -> None:
existing_scheduled_emails = ScheduledMessageNotificationEmail.objects.filter(
user_profile=user_profile
)
scheduled_timestamp_change = datetime.timedelta(
seconds=new_batching_period
) - datetime.timedelta(seconds=old_batching_period)
existing_scheduled_emails.update(
scheduled_timestamp=F("scheduled_timestamp") + scheduled_timestamp_change
)
@transaction.atomic(durable=True)
def do_change_user_setting(
user_profile: UserProfile,
setting_name: str,
setting_value: Union[bool, str, int],
*,
acting_user: Optional[UserProfile],
) -> None:
old_value = getattr(user_profile, setting_name)
event_time = timezone_now()
if setting_name == "timezone":
assert isinstance(setting_value, str)
setting_value = canonicalize_timezone(setting_value)
else:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
# TODO: Move these database actions into a transaction.atomic block.
user_profile.save(update_fields=[setting_name])
if setting_name in UserProfile.notification_setting_types:
# Prior to all personal settings being managed by property_types,
# these were only created for notification settings.
#
# TODO: Start creating these for all settings, and do a
# backfilled=True migration.
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_SETTING_CHANGED,
event_time=event_time,
acting_user=acting_user,
modified_user=user_profile,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: setting_value,
"property": setting_name,
}
).decode(),
)
# Disabling digest emails should clear a user's email queue
if setting_name == "enable_digest_emails" and not setting_value:
clear_scheduled_emails(user_profile.id, ScheduledEmail.DIGEST)
if setting_name == "email_notifications_batching_period_seconds":
assert isinstance(old_value, int)
assert isinstance(setting_value, int)
update_scheduled_email_notifications_time(user_profile, old_value, setting_value)
event = {
"type": "user_settings",
"op": "update",
"property": setting_name,
"value": setting_value,
}
if setting_name == "default_language":
assert isinstance(setting_value, str)
event["language_name"] = get_language_name(setting_value)
transaction.on_commit(lambda: send_event(user_profile.realm, event, [user_profile.id]))
if setting_name in UserProfile.notification_settings_legacy:
# This legacy event format is for backwards-compatibility with
# clients that don't support the new user_settings event type.
# We only send this for settings added before Feature level 89.
legacy_event = {
"type": "update_global_notifications",
"user": user_profile.email,
"notification_name": setting_name,
"setting": setting_value,
}
transaction.on_commit(
lambda: send_event(user_profile.realm, legacy_event, [user_profile.id])
)
if setting_name in UserProfile.display_settings_legacy or setting_name == "timezone":
# This legacy event format is for backwards-compatibility with
# clients that don't support the new user_settings event type.
# We only send this for settings added before Feature level 89.
legacy_event = {
"type": "update_display_settings",
"user": user_profile.email,
"setting_name": setting_name,
"setting": setting_value,
}
if setting_name == "default_language":
assert isinstance(setting_value, str)
legacy_event["language_name"] = get_language_name(setting_value)
transaction.on_commit(
lambda: send_event(user_profile.realm, legacy_event, [user_profile.id])
)
# Updates to the time zone display setting are sent to all users
if setting_name == "timezone":
payload = dict(
email=user_profile.email,
user_id=user_profile.id,
timezone=canonicalize_timezone(user_profile.timezone),
)
timezone_event = dict(type="realm_user", op="update", person=payload)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
timezone_event,
active_user_ids(user_profile.realm_id),
)
)
if setting_name == "enable_drafts_synchronization" and setting_value is False:
# Delete all of the drafts from the backend but don't send delete events
# for them since all that's happened is that we stopped syncing changes,
# not deleted every previously synced draft - to do that use the DELETE
# endpoint.
Draft.objects.filter(user_profile=user_profile).delete()
def lookup_default_stream_groups(
default_stream_group_names: List[str], realm: Realm
) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_("Invalid default stream group {}").format(group_name))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def notify_default_streams(realm: Realm) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)),
)
transaction.on_commit(lambda: send_event(realm, event, active_non_guest_user_ids(realm.id)))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(
get_default_stream_groups(realm)
),
)
transaction.on_commit(lambda: send_event(realm, event, active_non_guest_user_ids(realm.id)))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(stream.realm)
@transaction.atomic(savepoint=False)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(stream.realm)
def do_create_default_stream_group(
realm: Realm, group_name: str, description: str, streams: List[Stream]
) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(
_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group_name)
)
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description
)
if not created:
raise JsonableError(
_(
"Default stream group '{group_name}' already exists",
).format(group_name=group_name)
)
group.streams.set(streams)
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(
realm: Realm, group: DefaultStreamGroup, streams: List[Stream]
) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(
_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group.name)
)
if stream in group.streams.all():
raise JsonableError(
_(
"Stream '{stream_name}' is already present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name)
)
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(
realm: Realm, group: DefaultStreamGroup, streams: List[Stream]
) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(
_(
"Stream '{stream_name}' is not present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name)
)
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(
realm: Realm, group: DefaultStreamGroup, new_group_name: str
) -> None:
if group.name == new_group_name:
raise JsonableError(
_("This default stream group is already named '{}'").format(new_group_name)
)
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(
realm: Realm, group: DefaultStreamGroup, new_description: str
) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [
default.stream
for default in DefaultStream.objects.select_related().filter(realm_id=realm_id)
]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in JSON serializable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted((stream.to_dict() for stream in streams), key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted((group.to_dict() for group in groups), key=lambda elt: elt["name"])
def do_update_user_activity_interval(
user_profile: UserProfile, log_time: datetime.datetime
) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# Two intervals overlap iff each interval ends after the other
# begins. In this case, we just extend the old interval to
# include the new interval.
if log_time <= last.end and effective_end >= last.start:
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(
user_profile=user_profile, start=log_time, end=effective_end
)
@statsd_increment("user_activity")
def do_update_user_activity(
user_profile_id: int, client_id: int, query: str, count: int, log_time: datetime.datetime
) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile_id=user_profile_id,
client_id=client_id,
query=query,
defaults={"last_visit": log_time, "count": count},
)
if not created:
activity.count += count
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
# Most presence data is sent to clients in the main presence
# endpoint in response to the user's own presence; this results
# data that is 1-2 minutes stale for who is online. The flaw with
# this plan is when a user comes back online and then immediately
# sends a message, recipients may still see that user as offline!
# We solve that by sending an immediate presence update clients.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/presence.html for
# internals documentation on presence.
user_ids = active_user_ids(user_profile.realm_id)
if len(user_ids) > settings.USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS:
# These immediate presence generate quadratic work for Tornado
# (linear number of users in each event and the frequency of
# users coming online grows linearly with userbase too). In
# organizations with thousands of users, this can overload
# Tornado, especially if much of the realm comes online at the
# same time.
#
# The utility of these live-presence updates goes down as
# organizations get bigger (since one is much less likely to
# be paying attention to the sidebar); so beyond a limit, we
# stop sending them at all.
return
presence_dict = presence.to_dict()
event = dict(
type="presence",
email=user_profile.email,
user_id=user_profile.id,
server_timestamp=time.time(),
presence={presence_dict["client"]: presence_dict},
)
send_event(user_profile.realm, event, user_ids)
def consolidate_client(client: Client) -> Client:
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ["ZulipDesktop"]:
return get_client("website")
else:
return client
@statsd_increment("user_presence")
def do_update_user_presence(
user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int
) -> None:
client = consolidate_client(client)
defaults = dict(
timestamp=log_time,
status=status,
realm_id=user_profile.realm_id,
)
(presence, created) = UserPresence.objects.get_or_create(
user_profile=user_profile,
client=client,
defaults=defaults,
)
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.presence_disabled and (created or became_online):
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {"user_profile_id": user_profile.id, "time": datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(
user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int,
new_user_input: bool,
) -> None:
event = {
"user_profile_id": user_profile.id,
"status": status,
"time": datetime_to_timestamp(log_time),
"client": client.name,
}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_user_status(
user_profile: UserProfile,
away: Optional[bool],
status_text: Optional[str],
client_id: int,
emoji_name: Optional[str],
emoji_code: Optional[str],
reaction_type: Optional[str],
) -> None:
if away is None:
status = None
elif away:
status = UserStatus.AWAY
else:
status = UserStatus.NORMAL
realm = user_profile.realm
update_user_status(
user_profile_id=user_profile.id,
status=status,
status_text=status_text,
client_id=client_id,
emoji_name=emoji_name,
emoji_code=emoji_code,
reaction_type=reaction_type,
)
event = dict(
type="user_status",
user_id=user_profile.id,
)
if away is not None:
event["away"] = away
if status_text is not None:
event["status_text"] = status_text
if emoji_name is not None:
event["emoji_name"] = emoji_name
event["emoji_code"] = emoji_code
event["reaction_type"] = reaction_type
send_event(realm, event, active_user_ids(realm.id))
@dataclass
class ReadMessagesEvent:
messages: List[int]
all: bool
type: str = field(default="update_message_flags", init=False)
op: str = field(default="add", init=False)
operation: str = field(default="add", init=False)
flag: str = field(default="read", init=False)
def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:
log_statsd_event("bankruptcy")
# First, we clear mobile push notifications. This is safer in the
# event that the below logic times out and we're killed.
all_push_message_ids = (
UserMessage.objects.filter(
user_profile=user_profile,
)
.extra(
where=[UserMessage.where_active_push_notification()],
)
.values_list("message_id", flat=True)[0:10000]
)
do_clear_mobile_push_notifications_for_ids([user_profile.id], all_push_message_ids)
msgs = UserMessage.objects.filter(user_profile=user_profile).extra(
where=[UserMessage.where_unread()],
)
count = msgs.update(
flags=F("flags").bitor(UserMessage.flags.read),
)
event = asdict(
ReadMessagesEvent(
messages=[], # we don't send messages, since the client reloads anyway
all=True,
)
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
def do_mark_stream_messages_as_read(
user_profile: UserProfile, stream_recipient_id: int, topic_name: Optional[str] = None
) -> int:
log_statsd_event("mark_stream_as_read")
msgs = UserMessage.objects.filter(
user_profile=user_profile,
)
msgs = msgs.filter(message__recipient_id=stream_recipient_id)
if topic_name:
msgs = filter_by_topic_name_via_message(
query=msgs,
topic_name=topic_name,
)
msgs = msgs.extra(
where=[UserMessage.where_unread()],
)
message_ids = list(msgs.values_list("message_id", flat=True))
count = msgs.update(
flags=F("flags").bitor(UserMessage.flags.read),
)
event = asdict(
ReadMessagesEvent(
messages=message_ids,
all=False,
)
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
def do_mark_muted_user_messages_as_read(
user_profile: UserProfile,
muted_user: UserProfile,
) -> int:
messages = UserMessage.objects.filter(
user_profile=user_profile, message__sender=muted_user
).extra(where=[UserMessage.where_unread()])
message_ids = list(messages.values_list("message_id", flat=True))
count = messages.update(
flags=F("flags").bitor(UserMessage.flags.read),
)
event = asdict(
ReadMessagesEvent(
messages=message_ids,
all=False,
)
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
def do_update_mobile_push_notification(
message: Message,
prior_mention_user_ids: Set[int],
mentions_user_ids: Set[int],
stream_push_user_ids: Set[int],
) -> None:
# Called during the message edit code path to remove mobile push
# notifications for users who are no longer mentioned following
# the edit. See #15428 for details.
#
# A perfect implementation would also support updating the message
# in a sent notification if a message was edited to mention a
# group rather than a user (or vice versa), though it is likely
# not worth the effort to do such a change.
if not message.is_stream_message():
return
remove_notify_users = prior_mention_user_ids - mentions_user_ids - stream_push_user_ids
do_clear_mobile_push_notifications_for_ids(list(remove_notify_users), [message.id])
def do_clear_mobile_push_notifications_for_ids(
user_profile_ids: List[int], message_ids: List[int]
) -> None:
if len(message_ids) == 0:
return
# This function supports clearing notifications for several users
# only for the message-edit use case where we'll have a single message_id.
assert len(user_profile_ids) == 1 or len(message_ids) == 1
messages_by_user = defaultdict(list)
notifications_to_update = list(
UserMessage.objects.filter(
message_id__in=message_ids,
user_profile_id__in=user_profile_ids,
)
.extra(
where=[UserMessage.where_active_push_notification()],
)
.values_list("user_profile_id", "message_id")
)
for (user_id, message_id) in notifications_to_update:
messages_by_user[user_id].append(message_id)
for (user_profile_id, event_message_ids) in messages_by_user.items():
queue_json_publish(
"missedmessage_mobile_notifications",
{
"type": "remove",
"user_profile_id": user_profile_id,
"message_ids": event_message_ids,
},
)
def do_update_message_flags(
user_profile: UserProfile, client: Client, operation: str, flag: str, messages: List[int]
) -> int:
valid_flags = [item for item in UserMessage.flags if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '{}'").format(flag))
if flag in UserMessage.NON_EDITABLE_FLAGS:
raise JsonableError(_("Flag not editable: '{}'").format(flag))
if operation not in ("add", "remove"):
raise JsonableError(_("Invalid message flag operation: '{}'").format(operation))
flagattr = getattr(UserMessage.flags, flag)
msgs = UserMessage.objects.filter(user_profile=user_profile, message_id__in=messages)
# This next block allows you to star any message, even those you
# didn't receive (e.g. because you're looking at a public stream
# you're not subscribed to, etc.). The problem is that starring
# is a flag boolean on UserMessage, and UserMessage rows are
# normally created only when you receive a message to support
# searching your personal history. So we need to create one. We
# add UserMessage.flags.historical, so that features that need
# "messages you actually received" can exclude these UserMessages.
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read,
)
if operation == "add":
count = msgs.update(flags=F("flags").bitor(flagattr))
elif operation == "remove":
count = msgs.update(flags=F("flags").bitand(~flagattr))
event = {
"type": "update_message_flags",
"op": operation,
"operation": operation,
"flag": flag,
"messages": messages,
"all": False,
}
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
event_time = timezone_now()
do_clear_mobile_push_notifications_for_ids([user_profile.id], messages)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
class MessageUpdateUserInfoResult(TypedDict):
message_user_ids: Set[int]
mention_user_ids: Set[int]
def maybe_send_resolve_topic_notifications(
*,
user_profile: UserProfile,
stream: Stream,
old_topic: str,
new_topic: str,
changed_messages: List[Message],
) -> None:
# Note that topics will have already been stripped in check_update_message.
#
# This logic is designed to treat removing a weird "✔ ✔✔ "
# prefix as unresolving the topic.
if old_topic.lstrip(RESOLVED_TOPIC_PREFIX) != new_topic.lstrip(RESOLVED_TOPIC_PREFIX):
return
topic_resolved: bool = new_topic.startswith(RESOLVED_TOPIC_PREFIX) and not old_topic.startswith(
RESOLVED_TOPIC_PREFIX
)
topic_unresolved: bool = old_topic.startswith(
RESOLVED_TOPIC_PREFIX
) and not new_topic.startswith(RESOLVED_TOPIC_PREFIX)
if not topic_resolved and not topic_unresolved:
# If there's some other weird topic that does not toggle the
# state of "topic starts with RESOLVED_TOPIC_PREFIX", we do
# nothing. Any other logic could result in cases where we send
# these notifications in a non-alternating fashion.
#
# Note that it is still possible for an individual topic to
# have multiple "This topic was marked as resolved"
# notifications in a row: one can send new messages to the
# pre-resolve topic and then resolve the topic created that
# way to get multiple in the resolved topic. And then an
# administrator can the messages in between. We consider this
# to be a fundamental risk of irresponsible message deletion,
# not a bug with the "resolve topics" feature.
return
# Compute the users who either sent or reacted to messages that
# were moved via the "resolve topic' action. Only those users
# should be eligible for this message being managed as unread.
affected_participant_ids = (set(message.sender_id for message in changed_messages)) | set(
Reaction.objects.filter(message__in=changed_messages).values_list(
"user_profile_id", flat=True
)
)
sender = get_system_bot(settings.NOTIFICATION_BOT, user_profile.realm_id)
user_mention = silent_mention_syntax_for_user(user_profile)
with override_language(stream.realm.default_language):
if topic_resolved:
notification_string = _("{user} has marked this topic as resolved.")
elif topic_unresolved:
notification_string = _("{user} has marked this topic as unresolved.")
internal_send_stream_message(
sender,
stream,
new_topic,
notification_string.format(
user=user_mention,
),
limit_unread_user_ids=affected_participant_ids,
)
def send_message_moved_breadcrumbs(
user_profile: UserProfile,
old_stream: Stream,
old_topic: str,
old_thread_notification_string: Optional[str],
new_stream: Stream,
new_topic: Optional[str],
new_thread_notification_string: Optional[str],
changed_messages_count: int,
) -> None:
# Since moving content between streams is highly disruptive,
# it's worth adding a couple tombstone messages showing what
# happened.
sender = get_system_bot(settings.NOTIFICATION_BOT, old_stream.realm_id)
if new_topic is None:
new_topic = old_topic
user_mention = silent_mention_syntax_for_user(user_profile)
old_topic_link = f"#**{old_stream.name}>{old_topic}**"
new_topic_link = f"#**{new_stream.name}>{new_topic}**"
if new_thread_notification_string is not None:
with override_language(new_stream.realm.default_language):
internal_send_stream_message(
sender,
new_stream,
new_topic,
new_thread_notification_string.format(
old_location=old_topic_link,
user=user_mention,
changed_messages_count=changed_messages_count,
),
)
if old_thread_notification_string is not None:
with override_language(old_stream.realm.default_language):
# Send a notification to the old stream that the topic was moved.
internal_send_stream_message(
sender,
old_stream,
old_topic,
old_thread_notification_string.format(
user=user_mention,
new_location=new_topic_link,
changed_messages_count=changed_messages_count,
),
)
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical,
).values("user_profile_id", "flags")
rows = list(query)
message_user_ids = {row["user_profile_id"] for row in rows}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {row["user_profile_id"] for row in rows if int(row["flags"]) & mask}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(
rendering_result: MessageRenderingResult, ums: Iterable[UserMessage]
) -> None:
wildcard = rendering_result.mentions_wildcard
mentioned_ids = rendering_result.mentions_user_ids
ids_with_alert_words = rendering_result.user_ids_with_alert_words
changed_ums: Set[UserMessage] = set()
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if um.flags & flag:
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=["flags"])
def update_to_dict_cache(
changed_messages: List[Message], realm_id: Optional[int] = None
) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id)
for msg_id, msg in changed_messages_to_dict.items():
message_ids.append(msg_id)
key = to_dict_cache_key_id(msg_id)
items_for_remote_cache[key] = (msg,)
cache_set_many(items_for_remote_cache)
return message_ids
def do_update_embedded_data(
user_profile: UserProfile,
message: Message,
content: Optional[str],
rendering_result: MessageRenderingResult,
) -> None:
timestamp = timezone_now()
event: Dict[str, Any] = {
"type": "update_message",
"user_id": None,
"edit_timestamp": datetime_to_timestamp(timestamp),
"message_id": message.id,
"rendering_only": True,
}
changed_messages = [message]
rendered_content: Optional[str] = None
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(rendering_result, ums)
rendered_content = rendering_result.rendered_content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event["message_ids"] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
"id": um.user_profile_id,
"flags": um.flags_list(),
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
class DeleteMessagesEvent(TypedDict, total=False):
type: str
message_ids: List[int]
message_type: str
topic: str
stream_id: int
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic(savepoint=False)
def do_update_message(
user_profile: UserProfile,
target_message: Message,
new_stream: Optional[Stream],
topic_name: Optional[str],
propagate_mode: str,
send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool,
content: Optional[str],
rendering_result: Optional[MessageRenderingResult],
prior_mention_user_ids: Set[int],
mention_data: Optional[MentionData] = None,
) -> int:
"""
The main function for message editing. A message edit event can
modify:
* the message's content (in which case the caller will have
set both content and rendered_content),
* the topic, in which case the caller will have set topic_name
* or both
With topic edits, propagate_mode determines whether other message
also have their topics edited.
"""
timestamp = timezone_now()
target_message.last_edit_time = timestamp
event: Dict[str, Any] = {
"type": "update_message",
"user_id": user_profile.id,
"edit_timestamp": datetime_to_timestamp(timestamp),
"message_id": target_message.id,
"rendering_only": False,
}
edit_history_event: Dict[str, Any] = {
"user_id": user_profile.id,
"timestamp": event["edit_timestamp"],
}
changed_messages = [target_message]
realm = user_profile.realm
stream_being_edited = None
if target_message.is_stream_message():
stream_id = target_message.recipient.type_id
stream_being_edited = get_stream_by_id_in_realm(stream_id, realm)
event["stream_name"] = stream_being_edited.name
event["stream_id"] = stream_being_edited.id
ums = UserMessage.objects.filter(message=target_message.id)
if content is not None:
assert rendering_result is not None
# mention_data is required if there's a content edit.
assert mention_data is not None
# add data from group mentions to mentions_user_ids.
for group_id in rendering_result.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
rendering_result.mentions_user_ids.update(members)
update_user_message_flags(rendering_result, ums)
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
# to clients regardless, and a client already had access to
# the original/pre-edit content of the message anyway. That
# setting must be enforced on the client side, and making a
# change here simply complicates the logic for clients parsing
# edit history events.
event["orig_content"] = target_message.content
event["orig_rendered_content"] = target_message.rendered_content
edit_history_event["prev_content"] = target_message.content
edit_history_event["prev_rendered_content"] = target_message.rendered_content
edit_history_event[
"prev_rendered_content_version"
] = target_message.rendered_content_version
target_message.content = content
target_message.rendered_content = rendering_result.rendered_content
target_message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendering_result.rendered_content
event["prev_rendered_content_version"] = target_message.rendered_content_version
event["is_me_message"] = Message.is_status_message(
content, rendering_result.rendered_content
)
# target_message.has_image and target_message.has_link will have been
# already updated by Markdown rendering in the caller.
target_message.has_attachment = check_attachment_reference_change(
target_message, rendering_result
)
if target_message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = target_message.topic_name()
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
)
else:
stream_topic = None
info = get_recipient_info(
realm_id=realm.id,
recipient=target_message.recipient,
sender_id=target_message.sender_id,
stream_topic=stream_topic,
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
event["online_push_user_ids"] = list(info["online_push_user_ids"])
event["pm_mention_push_disabled_user_ids"] = list(info["pm_mention_push_disabled_user_ids"])
event["pm_mention_email_disabled_user_ids"] = list(
info["pm_mention_email_disabled_user_ids"]
)
event["stream_push_user_ids"] = list(info["stream_push_user_ids"])
event["stream_email_user_ids"] = list(info["stream_email_user_ids"])
event["muted_sender_user_ids"] = list(info["muted_sender_user_ids"])
event["prior_mention_user_ids"] = list(prior_mention_user_ids)
event["presence_idle_user_ids"] = filter_presence_idle_user_ids(info["active_user_ids"])
event["all_bot_user_ids"] = list(info["all_bot_user_ids"])
if rendering_result.mentions_wildcard:
event["wildcard_mention_user_ids"] = list(info["wildcard_mention_user_ids"])
else:
event["wildcard_mention_user_ids"] = []
do_update_mobile_push_notification(
target_message,
prior_mention_user_ids,
rendering_result.mentions_user_ids,
info["stream_push_user_ids"],
)
if topic_name is not None or new_stream is not None:
orig_topic_name = target_message.topic_name()
event["propagate_mode"] = propagate_mode
if new_stream is not None:
assert content is None
assert target_message.is_stream_message()
assert stream_being_edited is not None
edit_history_event["prev_stream"] = stream_being_edited.id
event[ORIG_TOPIC] = orig_topic_name
target_message.recipient_id = new_stream.recipient_id
event["new_stream_id"] = new_stream.id
event["propagate_mode"] = propagate_mode
# When messages are moved from one stream to another, some
# users may lose access to those messages, including guest
# users and users not subscribed to the new stream (if it is a
# private stream). For those users, their experience is as
# though the messages were deleted, and we should send a
# delete_message event to them instead.
subs_to_old_stream = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=True
).select_related("user_profile")
subs_to_new_stream = list(
get_active_subscriptions_for_stream_id(
new_stream.id, include_deactivated_users=True
).select_related("user_profile")
)
old_stream_sub_ids = [user.user_profile_id for user in subs_to_old_stream]
new_stream_sub_ids = [user.user_profile_id for user in subs_to_new_stream]
# Get users who aren't subscribed to the new_stream.
subs_losing_usermessages = [
sub for sub in subs_to_old_stream if sub.user_profile_id not in new_stream_sub_ids
]
# Users who can longer access the message without some action
# from administrators.
subs_losing_access = [
sub
for sub in subs_losing_usermessages
if sub.user_profile.is_guest or not new_stream.is_public()
]
ums = ums.exclude(
user_profile_id__in=[sub.user_profile_id for sub in subs_losing_usermessages]
)
subs_gaining_usermessages = []
if not new_stream.is_history_public_to_subscribers():
# For private streams, with history not public to subscribers,
# We find out users who are not present in the msgs' old stream
# and create new UserMessage for these users so that they can
# access this message.
subs_gaining_usermessages += [
user_id for user_id in new_stream_sub_ids if user_id not in old_stream_sub_ids
]
if topic_name is not None:
topic_name = truncate_topic(topic_name)
target_message.set_topic_name(topic_name)
# These fields have legacy field names.
event[ORIG_TOPIC] = orig_topic_name
event[TOPIC_NAME] = topic_name
event[TOPIC_LINKS] = topic_links(target_message.sender.realm_id, topic_name)
edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name
update_edit_history(target_message, timestamp, edit_history_event)
delete_event_notify_user_ids: List[int] = []
if propagate_mode in ["change_later", "change_all"]:
assert topic_name is not None or new_stream is not None
assert stream_being_edited is not None
# Other messages should only get topic/stream fields in their edit history.
topic_only_edit_history_event = {
k: v
for (k, v) in edit_history_event.items()
if k
not in [
"prev_content",
"prev_rendered_content",
"prev_rendered_content_version",
]
}
messages_list = update_messages_for_topic_edit(
acting_user=user_profile,
edited_message=target_message,
propagate_mode=propagate_mode,
orig_topic_name=orig_topic_name,
topic_name=topic_name,
new_stream=new_stream,
old_stream=stream_being_edited,
edit_history_event=topic_only_edit_history_event,
last_edit_time=timestamp,
)
changed_messages += messages_list
if new_stream is not None:
assert stream_being_edited is not None
changed_message_ids = [msg.id for msg in changed_messages]
if subs_gaining_usermessages:
ums_to_create = []
for message_id in changed_message_ids:
for user_profile_id in subs_gaining_usermessages:
# The fact that the user didn't have a UserMessage originally means we can infer that the user
# was not mentioned in the original message (even if mention syntax was present, it would not
# take effect for a user who was not subscribed). If we were editing the message's content, we
# would rerender the message and then use the new stream's data to determine whether this is
# a mention of a subscriber; but as we are not doing so, we choose to preserve the "was this
# mention syntax an actual mention" decision made during the original rendering for implementation
# simplicity. As a result, the only flag to consider applying here is read.
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message_id,
flags=UserMessage.flags.read,
)
ums_to_create.append(um)
bulk_insert_ums(ums_to_create)
# Delete UserMessage objects for users who will no
# longer have access to these messages. Note: This could be
# very expensive, since it's N guest users x M messages.
UserMessage.objects.filter(
user_profile_id__in=[sub.user_profile_id for sub in subs_losing_usermessages],
message_id__in=changed_message_ids,
).delete()
delete_event: DeleteMessagesEvent = {
"type": "delete_message",
"message_ids": changed_message_ids,
"message_type": "stream",
"stream_id": stream_being_edited.id,
"topic": orig_topic_name,
}
delete_event_notify_user_ids = [sub.user_profile_id for sub in subs_losing_access]
send_event(user_profile.realm, delete_event, delete_event_notify_user_ids)
# This does message.save(update_fields=[...])
save_message_for_edit_use_case(message=target_message)
realm_id: Optional[int] = None
if stream_being_edited is not None:
realm_id = stream_being_edited.realm_id
event["message_ids"] = update_to_dict_cache(changed_messages, realm_id)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
"id": um.user_profile_id,
"flags": um.flags_list(),
}
# The following blocks arranges that users who are subscribed to a
# stream and can see history from before they subscribed get
# live-update when old messages are edited (e.g. if the user does
# a topic edit themself).
#
# We still don't send an update event to users who are not
# subscribed to this stream and don't have a UserMessage row. This
# means if a non-subscriber is viewing the narrow, they won't get
# a real-time updates. This is a balance between sending
# message-edit notifications for every public stream to every user
# in the organization (too expansive, and also not what we do for
# newly sent messages anyway) and having magical live-updates
# where possible.
users_to_be_notified = list(map(user_info, ums))
if stream_being_edited is not None:
if stream_being_edited.is_history_public_to_subscribers:
subscriptions = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=False
)
# We exclude long-term idle users, since they by
# definition have no active clients.
subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)
# Remove duplicates by excluding the id of users already
# in users_to_be_notified list. This is the case where a
# user both has a UserMessage row and is a current
# Subscriber
subscriptions = subscriptions.exclude(
user_profile_id__in=[um.user_profile_id for um in ums]
)
if new_stream is not None:
assert delete_event_notify_user_ids is not None
subscriptions = subscriptions.exclude(
user_profile_id__in=delete_event_notify_user_ids
)
# All users that are subscribed to the stream must be
# notified when a message is edited
subscriber_ids = set(subscriptions.values_list("user_profile_id", flat=True))
if new_stream is not None:
# TODO: Guest users don't see the new moved topic
# unless breadcrumb message for new stream is
# enabled. Excluding these users from receiving this
# event helps us avoid a error traceback for our
# clients. We should figure out a way to inform the
# guest users of this new topic if sending a 'message'
# event for these messages is not an option.
#
# Don't send this event to guest subs who are not
# subscribers of the old stream but are subscribed to
# the new stream; clients will be confused.
old_stream_unsubbed_guests = [
sub
for sub in subs_to_new_stream
if sub.user_profile.is_guest and sub.user_profile_id not in subscriber_ids
]
subscriptions = subscriptions.exclude(
user_profile_id__in=[sub.user_profile_id for sub in old_stream_unsubbed_guests]
)
subscriber_ids = set(subscriptions.values_list("user_profile_id", flat=True))
users_to_be_notified += list(map(subscriber_info, sorted(list(subscriber_ids))))
send_event(user_profile.realm, event, users_to_be_notified)
if len(changed_messages) > 0 and new_stream is not None and stream_being_edited is not None:
# Notify users that the topic was moved.
changed_messages_count = len(changed_messages)
if propagate_mode == "change_all":
moved_all_visible_messages = True
else:
# With other propagate modes, if the user in fact moved
# all messages in the stream, we want to explain it was a
# full-topic move.
#
# For security model reasons, we don't want to allow a
# user to take any action that would leak information
# about older messages they cannot access (E.g. the only
# remaining messages are in a stream without shared
# history). The bulk_access_messages call below addresses
# that concern.
#
# bulk_access_messages is inefficient for this task, since
# we just want to do the exists() version of this
# query. But it's nice to reuse code, and this bulk
# operation is likely cheaper than a `GET /messages`
# unless the topic has thousands of messages of history.
unmoved_messages = messages_for_topic(
stream_being_edited.recipient_id,
orig_topic_name,
)
visible_unmoved_messages = bulk_access_messages(
user_profile, unmoved_messages, stream=stream_being_edited
)
moved_all_visible_messages = len(visible_unmoved_messages) == 0
old_thread_notification_string = None
if send_notification_to_old_thread:
if moved_all_visible_messages:
old_thread_notification_string = gettext_lazy(
"This topic was moved to {new_location} by {user}."
)
elif changed_messages_count == 1:
old_thread_notification_string = gettext_lazy(
"A message was moved from this topic to {new_location} by {user}."
)
else:
old_thread_notification_string = gettext_lazy(
"{changed_messages_count} messages were moved from this topic to {new_location} by {user}."
)
new_thread_notification_string = None
if send_notification_to_new_thread:
if moved_all_visible_messages:
new_thread_notification_string = gettext_lazy(
"This topic was moved here from {old_location} by {user}."
)
elif changed_messages_count == 1:
new_thread_notification_string = gettext_lazy(
"A message was moved here from {old_location} by {user}."
)
else:
new_thread_notification_string = gettext_lazy(
"{changed_messages_count} messages were moved here from {old_location} by {user}."
)
send_message_moved_breadcrumbs(
user_profile,
stream_being_edited,
orig_topic_name,
old_thread_notification_string,
new_stream,
topic_name,
new_thread_notification_string,
changed_messages_count,
)
if (
topic_name is not None
and new_stream is None
and content is None
and len(changed_messages) > 0
):
assert stream_being_edited is not None
maybe_send_resolve_topic_notifications(
user_profile=user_profile,
stream=stream_being_edited,
old_topic=orig_topic_name,
new_topic=topic_name,
changed_messages=changed_messages,
)
return len(changed_messages)
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single private message, as any other behaviour is not possible with
# the current callers to this method.
messages = list(messages)
message_ids = [message.id for message in messages]
if not message_ids:
return
event: DeleteMessagesEvent = {
"type": "delete_message",
"message_ids": message_ids,
}
sample_message = messages[0]
message_type = "stream"
users_to_notify = []
if not sample_message.is_stream_message():
assert len(messages) == 1
message_type = "private"
ums = UserMessage.objects.filter(message_id__in=message_ids)
users_to_notify = [um.user_profile_id for um in ums]
archiving_chunk_size = retention.MESSAGE_BATCH_SIZE
if message_type == "stream":
stream_id = sample_message.recipient.type_id
event["stream_id"] = stream_id
event["topic"] = sample_message.topic_name()
subscriptions = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=False
)
# We exclude long-term idle users, since they by definition have no active clients.
subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)
users_to_notify = list(subscriptions.values_list("user_profile_id", flat=True))
archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
event["message_type"] = message_type
transaction.on_commit(lambda: send_event(realm, event, users_to_notify))
def do_delete_messages_by_sender(user: UserProfile) -> None:
message_ids = list(
Message.objects.filter(sender=user).values_list("id", flat=True).order_by("id")
)
if message_ids:
move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS["messages_in_stream:is_bot:day"]
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property, end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values("stream_id").annotate(value=Sum("value"))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(
stream_id: int, stream_date_created: datetime.datetime, recent_traffic: Dict[int, int]
) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
def get_web_public_subs(realm: Realm) -> SubscriptionInfo:
color_idx = 0
def get_next_color() -> str:
nonlocal color_idx
color = STREAM_ASSIGNMENT_COLORS[color_idx]
color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)
return color
subscribed = []
for stream in get_web_public_streams_queryset(realm):
stream_dict = stream.to_dict()
# Add versions of the Subscription fields based on a simulated
# new user subscription set.
stream_dict["is_muted"] = False
stream_dict["color"] = get_next_color()
stream_dict["desktop_notifications"] = True
stream_dict["audible_notifications"] = True
stream_dict["push_notifications"] = True
stream_dict["email_notifications"] = True
stream_dict["pin_to_top"] = False
stream_weekly_traffic = get_average_weekly_stream_traffic(
stream.id, stream.date_created, {}
)
stream_dict["stream_weekly_traffic"] = stream_weekly_traffic
stream_dict["email_address"] = ""
subscribed.append(stream_dict)
return SubscriptionInfo(
subscriptions=subscribed,
unsubscribed=[],
never_subscribed=[],
)
def build_stream_dict_for_sub(
user: UserProfile,
sub_dict: RawSubscriptionDict,
raw_stream_dict: RawStreamDict,
recent_traffic: Dict[int, int],
) -> Dict[str, object]:
# We first construct a dictionary based on the standard Stream
# and Subscription models' API_FIELDS.
result = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
result["stream_id"] = raw_stream_dict["id"]
continue
elif field_name == "date_created":
result["date_created"] = datetime_to_timestamp(raw_stream_dict[field_name])
continue
result[field_name] = raw_stream_dict[field_name]
# Copy Subscription.API_FIELDS.
for field_name in Subscription.API_FIELDS:
result[field_name] = sub_dict[field_name]
# Backwards-compatibility for clients that haven't been
# updated for the in_home_view => is_muted API migration.
result["in_home_view"] = not result["is_muted"]
# Backwards-compatibility for clients that haven't been
# updated for the is_announcement_only -> stream_post_policy
# migration.
result["is_announcement_only"] = (
raw_stream_dict["stream_post_policy"] == Stream.STREAM_POST_POLICY_ADMINS
)
# Add a few computed fields not directly from the data models.
result["stream_weekly_traffic"] = get_average_weekly_stream_traffic(
raw_stream_dict["id"], raw_stream_dict["date_created"], recent_traffic
)
result["email_address"] = encode_email_address_helper(
raw_stream_dict["name"], raw_stream_dict["email_token"], show_sender=True
)
# Our caller may add a subscribers field.
return result
def build_stream_dict_for_never_sub(
raw_stream_dict: RawStreamDict,
recent_traffic: Dict[int, int],
) -> Dict[str, object]:
result = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
result["stream_id"] = raw_stream_dict["id"]
continue
elif field_name == "date_created":
result["date_created"] = datetime_to_timestamp(raw_stream_dict[field_name])
continue
result[field_name] = raw_stream_dict[field_name]
result["stream_weekly_traffic"] = get_average_weekly_stream_traffic(
raw_stream_dict["id"], raw_stream_dict["date_created"], recent_traffic
)
# Backwards-compatibility addition of removed field.
result["is_announcement_only"] = (
raw_stream_dict["stream_post_policy"] == Stream.STREAM_POST_POLICY_ADMINS
)
# Our caller may add a subscribers field.
return result
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(
user_profile: UserProfile,
include_subscribers: bool = True,
) -> SubscriptionInfo:
realm = user_profile.realm
all_streams: QuerySet[RawStreamDict] = get_active_streams(realm).values(
*Stream.API_FIELDS,
# The realm_id and recipient_id are generally not needed in the API.
"realm_id",
"recipient_id",
# email_token isn't public to some users with access to
# the stream, so doesn't belong in API_FIELDS.
"email_token",
)
recip_id_to_stream_id: Dict[int, int] = {
stream["recipient_id"]: stream["id"] for stream in all_streams
}
all_streams_map: Dict[int, RawStreamDict] = {stream["id"]: stream for stream in all_streams}
sub_dicts_query: Iterable[RawSubscriptionDict] = (
get_stream_subscriptions_for_user(user_profile)
.values(
*Subscription.API_FIELDS,
"recipient_id",
"active",
)
.order_by("recipient_id")
)
# We only care about subscriptions for active streams.
sub_dicts: List[RawSubscriptionDict] = [
sub_dict
for sub_dict in sub_dicts_query
if recip_id_to_stream_id.get(sub_dict["recipient_id"])
]
def get_stream_id(sub_dict: RawSubscriptionDict) -> int:
return recip_id_to_stream_id[sub_dict["recipient_id"]]
traffic_stream_ids = {get_stream_id(sub_dict) for sub_dict in sub_dicts}
recent_traffic = get_streams_traffic(stream_ids=traffic_stream_ids)
# Okay, now we finally get to populating our main results, which
# will be these three lists.
subscribed = []
unsubscribed = []
never_subscribed = []
sub_unsub_stream_ids = set()
for sub_dict in sub_dicts:
stream_id = get_stream_id(sub_dict)
sub_unsub_stream_ids.add(stream_id)
raw_stream_dict = all_streams_map[stream_id]
stream_dict = build_stream_dict_for_sub(
user=user_profile,
sub_dict=sub_dict,
raw_stream_dict=raw_stream_dict,
recent_traffic=recent_traffic,
)
# is_active is represented in this structure by which list we include it in.
is_active = sub_dict["active"]
if is_active:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
if user_profile.can_access_public_streams():
never_subscribed_stream_ids = set(all_streams_map) - sub_unsub_stream_ids
else:
web_public_stream_ids = {stream["id"] for stream in all_streams if stream["is_web_public"]}
never_subscribed_stream_ids = web_public_stream_ids - sub_unsub_stream_ids
never_subscribed_streams = [
all_streams_map[stream_id] for stream_id in never_subscribed_stream_ids
]
for raw_stream_dict in never_subscribed_streams:
is_public = not raw_stream_dict["invite_only"]
if is_public or user_profile.is_realm_admin:
stream_dict = build_stream_dict_for_never_sub(
raw_stream_dict=raw_stream_dict, recent_traffic=recent_traffic
)
never_subscribed.append(stream_dict)
if include_subscribers:
# The highly optimized bulk_get_subscriber_user_ids wants to know which
# streams we are subscribed to, for validation purposes, and it uses that
# info to know if it's allowed to find OTHER subscribers.
subscribed_stream_ids = {
get_stream_id(sub_dict) for sub_dict in sub_dicts if sub_dict["active"]
}
subscriber_map = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
subscribed_stream_ids,
)
for lst in [subscribed, unsubscribed, never_subscribed]:
for stream_dict in lst:
assert isinstance(stream_dict["stream_id"], int)
stream_id = stream_dict["stream_id"]
stream_dict["subscribers"] = subscriber_map[stream_id]
return SubscriptionInfo(
subscriptions=sorted(subscribed, key=lambda x: x["name"]),
unsubscribed=sorted(unsubscribed, key=lambda x: x["name"]),
never_subscribed=sorted(never_subscribed, key=lambda x: x["name"]),
)
def gather_subscriptions(
user_profile: UserProfile,
include_subscribers: bool = False,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
helper_result = gather_subscriptions_helper(
user_profile,
include_subscribers=include_subscribers,
)
subscribed = helper_result.subscriptions
unsubscribed = helper_result.unsubscribed
return (subscribed, unsubscribed)
class ActivePresenceIdleUserData(TypedDict):
alerted: bool
notifications_data: UserMessageNotificationsData
def get_active_presence_idle_user_ids(
realm: Realm,
sender_id: int,
active_users_data: List[ActivePresenceIdleUserData],
) -> List[int]:
"""
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications.
* They are no longer "present" according to the
UserPresence table.
"""
if realm.presence_disabled:
return []
user_ids = set()
for user_data in active_users_data:
user_notifications_data: UserMessageNotificationsData = user_data["notifications_data"]
alerted = user_data["alerted"]
# We only need to know the presence idle state for a user if this message would be notifiable
# for them if they were indeed idle. Only including those users in the calculation below is a
# very important optimization for open communities with many inactive users.
if user_notifications_data.is_notifiable(sender_id, idle=True) or alerted:
user_ids.add(user_notifications_data.user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
# Given a set of user IDs (the recipients of a message), accesses
# the UserPresence table to determine which of these users are
# currently idle and should potentially get email notifications
# (and push notifications with with
# user_profile.enable_online_push_notifications=False).
#
# We exclude any presence data from ZulipMobile for the purpose of
# triggering these notifications; the mobile app can more
# effectively do its own client-side filtering of notification
# sounds/etc. for the case that the user is actively doing a PM
# conversation in the app.
if not user_ids:
return []
# Matches presence.js constant
OFFLINE_THRESHOLD_SECS = 140
recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS)
rows = (
UserPresence.objects.filter(
user_profile_id__in=user_ids,
status=UserPresence.ACTIVE,
timestamp__gte=recent,
)
.exclude(client__name="ZulipMobile")
.distinct("user_profile_id")
.values("user_profile_id")
)
active_user_ids = {row["user_profile_id"] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(idle_user_ids)
def do_send_confirmation_email(
invitee: PreregistrationUser,
referrer: UserProfile,
email_language: str,
invite_expires_in_days: Union[Optional[int], UnspecifiedValue] = UnspecifiedValue(),
) -> str:
"""
Send the confirmation/welcome e-mail to an invited user.
"""
activation_url = create_confirmation_link(
invitee, Confirmation.INVITATION, validity_in_days=invite_expires_in_days
)
context = {
"referrer_full_name": referrer.full_name,
"referrer_email": referrer.delivery_email,
"activate_url": activation_url,
"referrer_realm_name": referrer.realm.name,
}
send_email(
"zerver/emails/invitation",
to_emails=[invitee.email],
from_address=FromAddress.tokenized_no_reply_address(),
language=email_language,
context=context,
realm=referrer.realm,
)
return activation_url
def email_not_system_bot(email: str) -> None:
if is_cross_realm_bot_email(email):
msg = email_reserved_for_system_bots_error(email)
code = msg
raise ValidationError(
msg,
code=code,
params=dict(deactivated=False),
)
def estimate_recent_invites(realms: Collection[Realm], *, days: int) -> int:
"""An upper bound on the number of invites sent in the last `days` days"""
recent_invites = RealmCount.objects.filter(
realm__in=realms,
property="invites_sent::day",
end_time__gte=timezone_now() - datetime.timedelta(days=days),
).aggregate(Sum("value"))["value__sum"]
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(realm: Realm, num_invitees: int) -> None:
"""Discourage using invitation emails as a vector for carrying spam."""
msg = _(
"To protect users, Zulip limits the number of invitations you can send in one day. Because you have reached the limit, no invitations were sent."
)
if not settings.OPEN_REALM_CREATION:
return
recent_invites = estimate_recent_invites([realm], days=1)
if num_invitees + recent_invites > realm.max_invites:
raise InvitationError(
msg,
[],
sent_invitations=False,
daily_limit_reached=True,
)
default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX
newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)
if realm.date_created <= timezone_now() - newrealm_age:
# If this isn't a "newly-created" realm, we're done. The
# remaining code applies an aggregate limit across all
# "new" realms, to address sudden bursts of spam realms.
return
if realm.max_invites > default_max:
# If a user is on a realm where we've bumped up
# max_invites, then we exempt them from invite limits.
return
new_realms = Realm.objects.filter(
date_created__gte=timezone_now() - newrealm_age,
_max_invites__lte=default_max,
).all()
for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:
recent_invites = estimate_recent_invites(new_realms, days=days)
if num_invitees + recent_invites > count:
raise InvitationError(
msg,
[],
sent_invitations=False,
daily_limit_reached=True,
)
def do_invite_users(
user_profile: UserProfile,
invitee_emails: Collection[str],
streams: Collection[Stream],
*,
invite_expires_in_days: Optional[int],
invite_as: int = PreregistrationUser.INVITE_AS["MEMBER"],
) -> None:
num_invites = len(invitee_emails)
check_invite_limit(user_profile.realm, num_invites)
if settings.BILLING_ENABLED:
from corporate.lib.registration import check_spare_licenses_available_for_inviting_new_users
check_spare_licenses_available_for_inviting_new_users(user_profile.realm, num_invites)
realm = user_profile.realm
if not realm.invite_required:
# Inhibit joining an open realm to send spam invitations.
min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)
if user_profile.date_joined > timezone_now() - min_age and not user_profile.is_realm_admin:
raise InvitationError(
_(
"Your account is too new to send invites for this organization. "
"Ask an organization admin, or a more experienced user."
),
[],
sent_invitations=False,
)
good_emails: Set[str] = set()
errors: List[Tuple[str, str, bool]] = []
validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm)
for email in invitee_emails:
if email == "":
continue
email_error = validate_email_is_valid(
email,
validate_email_allowed_in_realm,
)
if email_error:
errors.append((email, email_error, False))
else:
good_emails.add(email)
"""
good_emails are emails that look ok so far,
but we still need to make sure they're not
gonna conflict with existing users
"""
error_dict = get_existing_user_errors(user_profile.realm, good_emails)
skipped: List[Tuple[str, str, bool]] = []
for email in error_dict:
msg, deactivated = error_dict[email]
skipped.append((email, msg, deactivated))
good_emails.remove(email)
validated_emails = list(good_emails)
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped,
sent_invitations=False,
)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(
_("We weren't able to invite anyone."), skipped, sent_invitations=False
)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["invites_sent::day"],
None,
timezone_now(),
increment=len(validated_emails),
)
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(
email=email, referred_by=user_profile, invited_as=invite_as, realm=user_profile.realm
)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {
"prereg_id": prereg_user.id,
"referrer_id": user_profile.id,
"email_language": user_profile.realm.default_language,
"invite_expires_in_days": invite_expires_in_days,
}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(
_(
"Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"
),
skipped,
sent_invitations=True,
)
notify_invites_changed(user_profile.realm)
def get_invitation_expiry_date(confirmation_obj: Confirmation) -> Optional[int]:
expiry_date = confirmation_obj.expiry_date
if expiry_date is None:
return expiry_date
return datetime_to_timestamp(expiry_date)
def do_get_invites_controlled_by_user(user_profile: UserProfile) -> List[Dict[str, Any]]:
"""
Returns a list of dicts representing invitations that can be controlled by user_profile.
This isn't necessarily the same as all the invitations generated by the user, as administrators
can control also invitations that they did not themselves create.
"""
if user_profile.is_realm_admin:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm)
)
else:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by=user_profile)
)
invites = []
for invitee in prereg_users:
invites.append(
dict(
email=invitee.email,
invited_by_user_id=invitee.referred_by.id,
invited=datetime_to_timestamp(invitee.invited_at),
expiry_date=get_invitation_expiry_date(invitee.confirmation.get()),
id=invitee.id,
invited_as=invitee.invited_as,
is_multiuse=False,
)
)
if not user_profile.is_realm_admin:
# We do not return multiuse invites to non-admin users.
return invites
multiuse_confirmation_objs = Confirmation.objects.filter(
realm=user_profile.realm, type=Confirmation.MULTIUSE_INVITE
).filter(Q(expiry_date__gte=timezone_now()) | Q(expiry_date=None))
for confirmation_obj in multiuse_confirmation_objs:
invite = confirmation_obj.content_object
assert invite is not None
invites.append(
dict(
invited_by_user_id=invite.referred_by.id,
invited=datetime_to_timestamp(confirmation_obj.date_sent),
expiry_date=get_invitation_expiry_date(confirmation_obj),
id=invite.id,
link_url=confirmation_url(
confirmation_obj.confirmation_key,
user_profile.realm,
Confirmation.MULTIUSE_INVITE,
),
invited_as=invite.invited_as,
is_multiuse=True,
)
)
return invites
def get_valid_invite_confirmations_generated_by_user(
user_profile: UserProfile,
) -> List[Confirmation]:
prereg_user_ids = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by=user_profile)
).values_list("id", flat=True)
confirmations = list(
Confirmation.objects.filter(type=Confirmation.INVITATION, object_id__in=prereg_user_ids)
)
multiuse_invite_ids = MultiuseInvite.objects.filter(referred_by=user_profile).values_list(
"id", flat=True
)
confirmations += list(
Confirmation.objects.filter(
type=Confirmation.MULTIUSE_INVITE,
object_id__in=multiuse_invite_ids,
).filter(Q(expiry_date__gte=timezone_now()) | Q(expiry_date=None))
)
return confirmations
def revoke_invites_generated_by_user(user_profile: UserProfile) -> None:
confirmations_to_revoke = get_valid_invite_confirmations_generated_by_user(user_profile)
now = timezone_now()
for confirmation in confirmations_to_revoke:
confirmation.expiry_date = now
Confirmation.objects.bulk_update(confirmations_to_revoke, ["expiry_date"])
if len(confirmations_to_revoke):
notify_invites_changed(realm=user_profile.realm)
def do_create_multiuse_invite_link(
referred_by: UserProfile,
invited_as: int,
invite_expires_in_days: Optional[int],
streams: Sequence[Stream] = [],
) -> str:
realm = referred_by.realm
invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)
if streams:
invite.streams.set(streams)
invite.invited_as = invited_as
invite.save()
notify_invites_changed(referred_by.realm)
return create_confirmation_link(
invite, Confirmation.MULTIUSE_INVITE, validity_in_days=invite_expires_in_days
)
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
realm = prereg_user.realm
assert realm is not None
# Delete both the confirmation objects and the prereg_user object.
# TODO: Probably we actually want to set the confirmation objects
# to a "revoked" status so that we can give the invited user a better
# error message.
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type, object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
notify_invites_changed(realm)
def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None:
realm = multiuse_invite.referred_by.realm
content_type = ContentType.objects.get_for_model(MultiuseInvite)
Confirmation.objects.filter(content_type=content_type, object_id=multiuse_invite.id).delete()
multiuse_invite.delete()
notify_invites_changed(realm)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:
# These are two structurally for the caller's code path.
assert prereg_user.referred_by is not None
assert prereg_user.realm is not None
check_invite_limit(prereg_user.referred_by.realm, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
expiry_date = prereg_user.confirmation.get().expiry_date
if expiry_date is None:
invite_expires_in_days = None
else:
# The resent invitation is reset to expire as long after the
# reminder is sent as it lasted originally.
invite_expires_in_days = (expiry_date - prereg_user.invited_at).days
prereg_user.confirmation.clear()
do_increment_logging_stat(
prereg_user.realm, COUNT_STATS["invites_sent::day"], None, prereg_user.invited_at
)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {
"prereg_id": prereg_user.id,
"referrer_id": prereg_user.referred_by.id,
"email_language": prereg_user.referred_by.realm.default_language,
"invite_expires_in_days": invite_expires_in_days,
}
queue_json_publish("invites", event)
return datetime_to_timestamp(prereg_user.invited_at)
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update", realm_emoji=realm.get_emoji())
send_event(realm, event, active_user_ids(realm.id))
def check_add_realm_emoji(
realm: Realm, name: str, author: UserProfile, image_file: IO[bytes]
) -> RealmEmoji:
try:
realm_emoji = RealmEmoji(realm=realm, name=name, author=author)
realm_emoji.full_clean()
realm_emoji.save()
except django.db.utils.IntegrityError:
# Match the string in upload_emoji.
raise JsonableError(_("A custom emoji with this name already exists."))
emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)
# The only user-controlled portion of 'emoji_file_name' is an extension,
# which can not contain '..' or '/' or '\', making it difficult to exploit
emoji_file_name = mark_sanitized(emoji_file_name)
emoji_uploaded_successfully = False
is_animated = False
try:
is_animated = upload_emoji_image(image_file, emoji_file_name, author)
emoji_uploaded_successfully = True
finally:
if not emoji_uploaded_successfully:
realm_emoji.delete()
realm_emoji.file_name = emoji_file_name
realm_emoji.is_animated = is_animated
realm_emoji.save(update_fields=["file_name", "is_animated"])
notify_realm_emoji(realm_emoji.realm)
return realm_emoji
def do_remove_realm_emoji(realm: Realm, name: str) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)
emoji.deactivated = True
emoji.save(update_fields=["deactivated"])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Sequence[str]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(user_profile.realm, event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_mute_topic(
user_profile: UserProfile,
stream: Stream,
topic: str,
date_muted: Optional[datetime.datetime] = None,
) -> None:
if date_muted is None:
date_muted = timezone_now()
add_topic_mute(user_profile, stream.id, stream.recipient_id, topic, date_muted)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
try:
remove_topic_mute(user_profile, stream.id, topic)
except UserTopic.DoesNotExist:
raise JsonableError(_("Topic is not muted"))
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_mute_user(
user_profile: UserProfile,
muted_user: UserProfile,
date_muted: Optional[datetime.datetime] = None,
) -> None:
if date_muted is None:
date_muted = timezone_now()
add_user_mute(user_profile, muted_user, date_muted)
do_mark_muted_user_messages_as_read(user_profile, muted_user)
event = dict(type="muted_users", muted_users=get_user_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_MUTED,
event_time=date_muted,
extra_data=orjson.dumps({"muted_user_id": muted_user.id}).decode(),
)
def do_unmute_user(mute_object: MutedUser) -> None:
user_profile = mute_object.user_profile
muted_user = mute_object.muted_user
mute_object.delete()
event = dict(type="muted_users", muted_users=get_user_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_UNMUTED,
event_time=timezone_now(),
extra_data=orjson.dumps({"unmuted_user_id": muted_user.id}).decode(),
)
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(user.realm, event, [user.id])
def notify_linkifiers(realm: Realm) -> None:
realm_linkifiers = linkifiers_for_realm(realm.id)
event: Dict[str, object] = dict(type="realm_linkifiers", realm_linkifiers=realm_linkifiers)
send_event(realm, event, active_user_ids(realm.id))
# Below is code for backwards compatibility. The now deprecated
# "realm_filters" event-type is used by older clients, and uses
# tuples.
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(realm, event, active_user_ids(realm.id))
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_linkifier(realm: Realm, pattern: str, url_format_string: str) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
linkifier = RealmFilter(realm=realm, pattern=pattern, url_format_string=url_format_string)
linkifier.full_clean()
linkifier.save()
notify_linkifiers(realm)
return linkifier.id
def do_remove_linkifier(
realm: Realm, pattern: Optional[str] = None, id: Optional[int] = None
) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, id=id).delete()
notify_linkifiers(realm)
def do_update_linkifier(realm: Realm, id: int, pattern: str, url_format_string: str) -> None:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
linkifier = RealmFilter.objects.get(realm=realm, id=id)
linkifier.pattern = pattern
linkifier.url_format_string = url_format_string
linkifier.full_clean()
linkifier.save(update_fields=["pattern", "url_format_string"])
notify_linkifiers(realm)
def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(
realm=realm, domain=domain, allow_subdomains=allow_subdomains
)
event = dict(
type="realm_domains",
op="add",
realm_domain=dict(
domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains
),
)
send_event(realm, event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=["allow_subdomains"])
event = dict(
type="realm_domains",
op="change",
realm_domain=dict(
domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains
),
)
send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(
realm_domain: RealmDomain, *, acting_user: Optional[UserProfile]
) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:
# If this was the last realm domain, we mark the realm as no
# longer restricted to domain, because the feature doesn't do
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, "emails_restricted_to_domains", False, acting_user=acting_user)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(realm, event, active_user_ids(realm.id))
def notify_realm_playgrounds(realm: Realm) -> None:
event = dict(type="realm_playgrounds", realm_playgrounds=get_realm_playgrounds(realm))
send_event(realm, event, active_user_ids(realm.id))
def do_add_realm_playground(realm: Realm, **kwargs: Any) -> int:
realm_playground = RealmPlayground(realm=realm, **kwargs)
# We expect full_clean to always pass since a thorough input validation
# is performed in the view (using check_url, check_pygments_language, etc)
# before calling this function.
realm_playground.full_clean()
realm_playground.save()
notify_realm_playgrounds(realm)
return realm_playground.id
def do_remove_realm_playground(realm: Realm, realm_playground: RealmPlayground) -> None:
realm_playground.delete()
notify_realm_playgrounds(realm)
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
"""Get streams with subscribers"""
exists_expression = Exists(
Subscription.objects.filter(
active=True,
is_user_active=True,
user_profile__realm=realm,
recipient_id=OuterRef("recipient_id"),
),
)
occupied_streams = (
Stream.objects.filter(realm=realm, deactivated=False)
.annotate(occupied=exists_expression)
.filter(occupied=True)
)
return occupied_streams
def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]: # nocoverage
query = get_web_public_streams_queryset(realm)
streams = Stream.get_client_data(query)
return streams
def do_get_streams(
user_profile: UserProfile,
include_public: bool = True,
include_web_public: bool = False,
include_subscribed: bool = True,
include_all_active: bool = False,
include_default: bool = False,
include_owner_subscribed: bool = False,
) -> List[Dict[str, Any]]:
# This function is only used by API clients now.
if include_all_active and not user_profile.is_realm_admin:
raise JsonableError(_("User not authorized for this query"))
include_public = include_public and user_profile.can_access_public_streams()
# Start out with all active streams in the realm.
query = Stream.objects.filter(realm=user_profile.realm, deactivated=False)
if include_all_active:
streams = Stream.get_client_data(query)
else:
# We construct a query as the or (|) of the various sources
# this user requested streams from.
query_filter: Optional[Q] = None
def add_filter_option(option: Q) -> None:
nonlocal query_filter
if query_filter is None:
query_filter = option
else:
query_filter |= option
if include_subscribed:
subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile)
recipient_check = Q(id__in=set(subscribed_stream_ids))
add_filter_option(recipient_check)
if include_public:
invite_only_check = Q(invite_only=False)
add_filter_option(invite_only_check)
if include_web_public:
# This should match get_web_public_streams_queryset
web_public_check = Q(
is_web_public=True,
invite_only=False,
history_public_to_subscribers=True,
deactivated=False,
)
add_filter_option(web_public_check)
if include_owner_subscribed and user_profile.is_bot:
bot_owner = user_profile.bot_owner
assert bot_owner is not None
owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner)
owner_subscribed_check = Q(id__in=set(owner_stream_ids))
add_filter_option(owner_subscribed_check)
if query_filter is not None:
query = query.filter(query_filter)
streams = Stream.get_client_data(query)
else:
# Don't bother going to the database with no valid sources
streams = []
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream["is_default"] = is_default.get(stream["stream_id"], False)
return streams
def notify_attachment_update(
user_profile: UserProfile, op: str, attachment_dict: Dict[str, Any]
) -> None:
event = {
"type": "attachment",
"op": op,
"attachment": attachment_dict,
"upload_space_used": user_profile.realm.currently_used_upload_space_bytes(),
}
send_event(user_profile.realm, event, [user_profile.id])
def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool:
claimed = False
for path_id in potential_path_ids:
user_profile = message.sender
is_message_realm_public = False
is_message_web_public = False
if message.is_stream_message():
stream = Stream.objects.get(id=message.recipient.type_id)
is_message_realm_public = stream.is_public()
is_message_web_public = stream.is_web_public
if not validate_attachment_request(user_profile, path_id):
# Technically, there are 2 cases here:
# * The user put something in their message that has the form
# of an upload, but doesn't correspond to a file that doesn't
# exist. validate_attachment_request will return None.
# * The user is trying to send a link to a file they don't have permission to
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning(
"User %s tried to share upload %s in message %s, but lacks permission",
user_profile.id,
path_id,
message.id,
)
continue
claimed = True
attachment = claim_attachment(
user_profile, path_id, message, is_message_realm_public, is_message_web_public
)
notify_attachment_update(user_profile, "update", attachment.to_dict())
return claimed
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(
message: Message, rendering_result: MessageRenderingResult
) -> bool:
# For a unsaved message edit (message.* has been updated, but not
# saved to the database), adjusts Attachment data to correspond to
# the new content.
prev_attachments = {a.path_id for a in message.attachment_set.all()}
new_attachments = set(rendering_result.potential_attachment_path_ids)
if new_attachments == prev_attachments:
return bool(prev_attachments)
to_remove = list(prev_attachments - new_attachments)
if len(to_remove) > 0:
attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message, to_add)
return message.attachment_set.exists()
def notify_realm_custom_profile_fields(realm: Realm) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields", fields=[f.as_dict() for f in fields])
send_event(realm, event, active_user_ids(realm.id))
def try_add_realm_default_custom_profile_field(
realm: Realm, field_subtype: str
) -> CustomProfileField:
field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype]
custom_profile_field = CustomProfileField(
realm=realm,
name=field_data["name"],
field_type=CustomProfileField.EXTERNAL_ACCOUNT,
hint=field_data["hint"],
field_data=orjson.dumps(dict(subtype=field_subtype)).decode(),
)
custom_profile_field.save()
custom_profile_field.order = custom_profile_field.id
custom_profile_field.save(update_fields=["order"])
notify_realm_custom_profile_fields(realm)
return custom_profile_field
def try_add_realm_custom_profile_field(
realm: Realm,
name: str,
field_type: int,
hint: str = "",
field_data: Optional[ProfileFieldData] = None,
) -> CustomProfileField:
custom_profile_field = CustomProfileField(realm=realm, name=name, field_type=field_type)
custom_profile_field.hint = hint
if (
custom_profile_field.field_type == CustomProfileField.SELECT
or custom_profile_field.field_type == CustomProfileField.EXTERNAL_ACCOUNT
):
custom_profile_field.field_data = orjson.dumps(field_data or {}).decode()
custom_profile_field.save()
custom_profile_field.order = custom_profile_field.id
custom_profile_field.save(update_fields=["order"])
notify_realm_custom_profile_fields(realm)
return custom_profile_field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
"""
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
"""
field.delete()
notify_realm_custom_profile_fields(realm)
def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
def try_update_realm_custom_profile_field(
realm: Realm,
field: CustomProfileField,
name: str,
hint: str = "",
field_data: Optional[ProfileFieldData] = None,
) -> None:
field.name = name
field.hint = hint
if (
field.field_type == CustomProfileField.SELECT
or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT
):
field.field_data = orjson.dumps(field_data or {}).decode()
field.save()
notify_realm_custom_profile_fields(realm)
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
order_mapping = {_[1]: _[0] for _ in enumerate(order)}
custom_profile_fields = CustomProfileField.objects.filter(realm=realm)
for custom_profile_field in custom_profile_fields:
if custom_profile_field.id not in order_mapping:
raise JsonableError(_("Invalid order mapping."))
for custom_profile_field in custom_profile_fields:
custom_profile_field.order = order_mapping[custom_profile_field.id]
custom_profile_field.save(update_fields=["order"])
notify_realm_custom_profile_fields(realm)
def notify_user_update_custom_profile_data(
user_profile: UserProfile, field: Dict[str, Union[int, str, List[int], None]]
) -> None:
data = dict(id=field["id"], value=field["value"])
if field["rendered_value"]:
data["rendered_value"] = field["rendered_value"]
payload = dict(user_id=user_profile.id, custom_profile_field=data)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))
def do_update_user_custom_profile_data_if_changed(
user_profile: UserProfile,
data: List[Dict[str, Union[int, ProfileDataElementValue]]],
) -> None:
with transaction.atomic():
for custom_profile_field in data:
field_value, created = CustomProfileFieldValue.objects.get_or_create(
user_profile=user_profile, field_id=custom_profile_field["id"]
)
# field_value.value is a TextField() so we need to have field["value"]
# in string form to correctly make comparisons and assignments.
if isinstance(custom_profile_field["value"], str):
custom_profile_field_value_string = custom_profile_field["value"]
else:
custom_profile_field_value_string = orjson.dumps(
custom_profile_field["value"]
).decode()
if not created and field_value.value == custom_profile_field_value_string:
# If the field value isn't actually being changed to a different one,
# we have nothing to do here for this field.
continue
field_value.value = custom_profile_field_value_string
if field_value.field.is_renderable():
field_value.rendered_value = render_stream_description(
custom_profile_field_value_string
)
field_value.save(update_fields=["value", "rendered_value"])
else:
field_value.save(update_fields=["value"])
notify_user_update_custom_profile_data(
user_profile,
{
"id": field_value.field_id,
"value": field_value.value,
"rendered_value": field_value.rendered_value,
"type": field_value.field.field_type,
},
)
def check_remove_custom_profile_field_value(user_profile: UserProfile, field_id: int) -> None:
try:
custom_profile_field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id)
field_value = CustomProfileFieldValue.objects.get(
field=custom_profile_field, user_profile=user_profile
)
field_value.delete()
notify_user_update_custom_profile_data(
user_profile,
{
"id": field_id,
"value": None,
"rendered_value": None,
"type": custom_profile_field.field_type,
},
)
except CustomProfileField.DoesNotExist:
raise JsonableError(_("Field id {id} not found.").format(id=field_id))
except CustomProfileFieldValue.DoesNotExist:
pass
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(
type="user_group",
op="add",
group=dict(
name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
is_system_group=user_group.is_system_group,
),
)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def check_add_user_group(
realm: Realm, name: str, initial_members: List[UserProfile], description: str
) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, str]) -> None:
event = dict(type="user_group", op="update", group_id=user_group.id, data=data)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: str) -> None:
try:
user_group.name = name
user_group.save(update_fields=["name"])
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: str) -> None:
user_group.description = description
user_group.save(update_fields=["description"])
do_send_user_group_update_event(user_group, dict(description=description))
def do_update_outgoing_webhook_service(
bot_profile: UserProfile, service_interface: int, service_payload_url: str
) -> None:
# TODO: First service is chosen because currently one bot can only have one service.
# Update this once multiple services are supported.
service = get_bot_services(bot_profile.id)[0]
service.base_url = service_payload_url
service.interface = service_interface
service.save()
send_event(
bot_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=bot_profile.id,
services=[
dict(
base_url=service.base_url, interface=service.interface, token=service.token
)
],
),
),
bot_owner_user_ids(bot_profile),
)
def do_update_bot_config_data(bot_profile: UserProfile, config_data: Dict[str, str]) -> None:
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
updated_config_data = get_bot_config(bot_profile)
send_event(
bot_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=bot_profile.id,
services=[dict(config_data=updated_config_data)],
),
),
bot_owner_user_ids(bot_profile),
)
def get_service_dicts_for_bot(user_profile_id: int) -> List[Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
services = get_bot_services(user_profile_id)
service_dicts: List[Dict[str, Any]] = []
if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [
{
"base_url": service.base_url,
"interface": service.interface,
"token": service.token,
}
for service in services
]
elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:
try:
service_dicts = [
{
"config_data": get_bot_config(user_profile),
"service_name": services[0].name,
}
]
# A ConfigError just means that there are no config entries for user_profile.
except ConfigError:
pass
return service_dicts
def get_service_dicts_for_bots(
bot_dicts: List[Dict[str, Any]], realm: Realm
) -> Dict[int, List[Dict[str, Any]]]:
bot_profile_ids = [bot_dict["id"] for bot_dict in bot_dicts]
bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list)
for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):
bot_services_by_uid[service.user_profile_id].append(service)
embedded_bot_ids = [
bot_dict["id"] for bot_dict in bot_dicts if bot_dict["bot_type"] == UserProfile.EMBEDDED_BOT
]
embedded_bot_configs = get_bot_configs(embedded_bot_ids)
service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {}
for bot_dict in bot_dicts:
bot_profile_id = bot_dict["id"]
bot_type = bot_dict["bot_type"]
services = bot_services_by_uid[bot_profile_id]
service_dicts: List[Dict[str, Any]] = []
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [
{
"base_url": service.base_url,
"interface": service.interface,
"token": service.token,
}
for service in services
]
elif bot_type == UserProfile.EMBEDDED_BOT:
if bot_profile_id in embedded_bot_configs.keys():
bot_config = embedded_bot_configs[bot_profile_id]
service_dicts = [
{
"config_data": bot_config,
"service_name": services[0].name,
}
]
service_dicts_by_uid[bot_profile_id] = service_dicts
return service_dicts_by_uid
def get_owned_bot_dicts(
user_profile: UserProfile, include_all_realm_bots_if_admin: bool = True
) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(
realm=user_profile.realm, is_bot=True, bot_owner=user_profile
).values(*bot_dict_fields)
services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)
return [
{
"email": botdict["email"],
"user_id": botdict["id"],
"full_name": botdict["full_name"],
"bot_type": botdict["bot_type"],
"is_active": botdict["is_active"],
"api_key": botdict["api_key"],
"default_sending_stream": botdict["default_sending_stream__name"],
"default_events_register_stream": botdict["default_events_register_stream__name"],
"default_all_public_streams": botdict["default_all_public_streams"],
"owner_id": botdict["bot_owner_id"],
"avatar_url": avatar_url_from_dict(botdict),
"services": services_by_ids[botdict["id"]],
}
for botdict in result
]
def do_send_user_group_members_update_event(
event_name: str, user_group: UserGroup, user_ids: List[int]
) -> None:
event = dict(type="user_group", op=event_name, group_id=user_group.id, user_ids=user_ids)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None:
memberships = [
UserGroupMembership(user_group_id=user_group.id, user_profile=user_profile)
for user_profile in user_profiles
]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event("add_members", user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id, user_profile__in=user_profiles
).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event("remove_members", user_group, user_ids)
def do_send_delete_user_group_event(realm: Realm, user_group_id: int, realm_id: int) -> None:
event = dict(type="user_group", op="remove", group_id=user_group_id)
send_event(realm, event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:
user_group = access_user_group_by_id(user_group_id, user_profile)
user_group.delete()
do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)
def do_send_realm_reactivation_email(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
RealmAuditLog.objects.create(
realm=realm,
acting_user=acting_user,
event_type=RealmAuditLog.REALM_REACTIVATION_EMAIL_SENT,
event_time=timezone_now(),
)
context = {"confirmation_url": url, "realm_uri": realm.uri, "realm_name": realm.name}
language = realm.default_language
send_email_to_admins(
"zerver/emails/realm_reactivation",
realm,
from_address=FromAddress.tokenized_no_reply_address(),
from_name=FromAddress.security_email_from_name(language=language),
language=language,
context=context,
)
def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None:
user.zoom_token = token
user.save(update_fields=["zoom_token"])
send_event(
user.realm,
dict(type="has_zoom_token", value=token is not None),
[user.id],
)
def notify_realm_export(user_profile: UserProfile) -> None:
# In the future, we may want to send this event to all realm admins.
event = dict(type="realm_export", exports=get_realm_exports_serialized(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None:
# Give mypy a hint so it knows `orjson.loads`
# isn't being passed an `Optional[str]`.
export_extra_data = export.extra_data
assert export_extra_data is not None
export_data = orjson.loads(export_extra_data)
export_path = export_data.get("export_path")
if export_path:
# Allow removal even if the export failed.
delete_export_tarball(export_path)
export_data.update(deleted_timestamp=timezone_now().timestamp())
export.extra_data = orjson.dumps(export_data).decode()
export.save(update_fields=["extra_data"])
notify_realm_export(user_profile)
def get_topic_messages(user_profile: UserProfile, stream: Stream, topic_name: str) -> List[Message]:
query = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient=stream.recipient,
).order_by("id")
return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]
| andersk/zulip | zerver/lib/actions.py | Python | apache-2.0 | 325,551 |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from scipy.stats import multivariate_normal as mvn
norm.pdf(0)
# mean: loc, stddev: scale
norm.pdf(0, loc=5, scale=10)
r = np.random.randn(10)
# probability distribution function:
norm.pdf(r)
# log probability:
norm.logpdf(r)
# cumulative distribution function:
norm.cdf(r)
# log cumulative distribution function:
norm.logcdf(r)
# sampling from standard normal:
r = np.random.randn(10000)
plt.hist(r, bins=100)
plt.show()
mean = 5
stddev = 10
r = stddev*np.random.randn(10000)+mean
plt.hist(r, bins=100)
plt.show()
# spherical Gaussian:
r = np.random.randn(10000, 2)
plt.scatter(r[:,0], r[:,1])
plt.show()
# elliptical Gaussian:
r[:,1] = stddev*r[:,1]+mean
plt.scatter(r[:,0], r[:,1])
plt.axis('equal')
plt.show()
# non-axis-aligned Gaussian:
cov = np.array([[1,0.8],[0.8,3]]) # covariant matrix, covariance: 0.8 in both dimensions
mu = np.array([0,2])
r = mvn.rvs(mean=mu, cov=cov, size=1000)
plt.scatter(r[:,0], r[:,1])
plt.axis('equal')
plt.show()
r = np.random.multivariate_normal(mean=mu, cov=cov, size=1000)
plt.scatter(r[:,0], r[:,1])
plt.axis('equal')
plt.show()
# loading Matlab .mat files: scipy.io.loadmat
# loading WAV files: scipy.io.wavfile.read (default sampling: 44100 Hz)
# saving WAV files: scipy.io.wavfile.write
# signal processing, filtering: scipy.signal
# convolution: scipy.signal.convolve, scipy.signal.convolve2d
# FFT is in numpy:
x = np.linspace(0, 100, 10000)
y = np.sin(x) + np.sin(3*x) + np.sin(5*x)
plt.plot(x, y)
plt.show()
Y = np.fft.fft(y)
plt.plot(np.abs(Y))
plt.show()
2*np.pi*16/100
2*np.pi*48/100
2*np.pi*80/100
| balazssimon/ml-playground | udemy/lazyprogrammer/numpy-stack/scipy_test.py | Python | apache-2.0 | 1,730 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import platform
import shutil
import warnings
import gc
import itertools
import operator
import random
import pyspark.heapq3 as heapq
from pyspark.serializers import BatchedSerializer, PickleSerializer, FlattenedValuesSerializer, \
CompressedSerializer, AutoBatchedSerializer
try:
import psutil
process = None
def get_used_memory():
""" Return the used memory in MB """
global process
if process is None or process._pid != os.getpid():
process = psutil.Process(os.getpid())
if hasattr(process, "memory_info"):
info = process.memory_info()
else:
info = process.get_memory_info()
return info.rss >> 20
except ImportError:
def get_used_memory():
""" Return the used memory in MB """
if platform.system() == 'Linux':
for line in open('/proc/self/status'):
if line.startswith('VmRSS:'):
return int(line.split()[1]) >> 10
else:
warnings.warn("Please install psutil to have better "
"support with spilling")
if platform.system() == "Darwin":
import resource
rss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
return rss >> 20
# TODO: support windows
return 0
def _get_local_dirs(sub):
""" Get all the directories """
path = os.environ.get("SPARK_LOCAL_DIRS", "/tmp")
dirs = path.split(",")
if len(dirs) > 1:
# different order in different processes and instances
rnd = random.Random(os.getpid() + id(dirs))
random.shuffle(dirs, rnd.random)
return [os.path.join(d, "python", str(os.getpid()), sub) for d in dirs]
# global stats
MemoryBytesSpilled = 0
DiskBytesSpilled = 0
class Aggregator(object):
"""
Aggregator has tree functions to merge values into combiner.
createCombiner: (value) -> combiner
mergeValue: (combine, value) -> combiner
mergeCombiners: (combiner, combiner) -> combiner
"""
def __init__(self, createCombiner, mergeValue, mergeCombiners):
self.createCombiner = createCombiner
self.mergeValue = mergeValue
self.mergeCombiners = mergeCombiners
class SimpleAggregator(Aggregator):
"""
SimpleAggregator is useful for the cases that combiners have
same type with values
"""
def __init__(self, combiner):
Aggregator.__init__(self, lambda x: x, combiner, combiner)
class Merger(object):
"""
Merge shuffled data together by aggregator
"""
def __init__(self, aggregator):
self.agg = aggregator
def mergeValues(self, iterator):
""" Combine the items by creator and combiner """
raise NotImplementedError
def mergeCombiners(self, iterator):
""" Merge the combined items by mergeCombiner """
raise NotImplementedError
def items(self):
""" Return the merged items ad iterator """
raise NotImplementedError
def _compressed_serializer(self, serializer=None):
# always use PickleSerializer to simplify implementation
ser = PickleSerializer()
return AutoBatchedSerializer(CompressedSerializer(ser))
class ExternalMerger(Merger):
"""
External merger will dump the aggregated data into disks when
memory usage goes above the limit, then merge them together.
This class works as follows:
- It repeatedly combine the items and save them in one dict in
memory.
- When the used memory goes above memory limit, it will split
the combined data into partitions by hash code, dump them
into disk, one file per partition.
- Then it goes through the rest of the iterator, combine items
into different dict by hash. Until the used memory goes over
memory limit, it dump all the dicts into disks, one file per
dict. Repeat this again until combine all the items.
- Before return any items, it will load each partition and
combine them separately. Yield them before loading next
partition.
- During loading a partition, if the memory goes over limit,
it will partition the loaded data and dump them into disks
and load them partition by partition again.
`data` and `pdata` are used to hold the merged items in memory.
At first, all the data are merged into `data`. Once the used
memory goes over limit, the items in `data` are dumped into
disks, `data` will be cleared, all rest of items will be merged
into `pdata` and then dumped into disks. Before returning, all
the items in `pdata` will be dumped into disks.
Finally, if any items were spilled into disks, each partition
will be merged into `data` and be yielded, then cleared.
>>> agg = SimpleAggregator(lambda x, y: x + y)
>>> merger = ExternalMerger(agg, 10)
>>> N = 10000
>>> merger.mergeValues(zip(range(N), range(N)))
>>> assert merger.spills > 0
>>> sum(v for k,v in merger.items())
49995000
>>> merger = ExternalMerger(agg, 10)
>>> merger.mergeCombiners(zip(range(N), range(N)))
>>> assert merger.spills > 0
>>> sum(v for k,v in merger.items())
49995000
"""
# the max total partitions created recursively
MAX_TOTAL_PARTITIONS = 4096
def __init__(self, aggregator, memory_limit=512, serializer=None,
localdirs=None, scale=1, partitions=59, batch=1000):
Merger.__init__(self, aggregator)
self.memory_limit = memory_limit
self.serializer = _compressed_serializer(serializer)
self.localdirs = localdirs or _get_local_dirs(str(id(self)))
# number of partitions when spill data into disks
self.partitions = partitions
# check the memory after # of items merged
self.batch = batch
# scale is used to scale down the hash of key for recursive hash map
self.scale = scale
# un-partitioned merged data
self.data = {}
# partitioned merged data, list of dicts
self.pdata = []
# number of chunks dumped into disks
self.spills = 0
# randomize the hash of key, id(o) is the address of o (aligned by 8)
self._seed = id(self) + 7
def _get_spill_dir(self, n):
""" Choose one directory for spill by number n """
return os.path.join(self.localdirs[n % len(self.localdirs)], str(n))
def _next_limit(self):
"""
Return the next memory limit. If the memory is not released
after spilling, it will dump the data only when the used memory
starts to increase.
"""
return max(self.memory_limit, get_used_memory() * 1.05)
def mergeValues(self, iterator):
""" Combine the items by creator and combiner """
# speedup attribute lookup
creator, comb = self.agg.createCombiner, self.agg.mergeValue
c, data, pdata, hfun, batch = 0, self.data, self.pdata, self._partition, self.batch
limit = self.memory_limit
for k, v in iterator:
d = pdata[hfun(k)] if pdata else data
d[k] = comb(d[k], v) if k in d else creator(v)
c += 1
if c >= batch:
if get_used_memory() >= limit:
self._spill()
limit = self._next_limit()
batch /= 2
c = 0
else:
batch *= 1.5
if get_used_memory() >= limit:
self._spill()
def _partition(self, key):
""" Return the partition for key """
return hash((key, self._seed)) % self.partitions
def _object_size(self, obj):
""" How much of memory for this obj, assume that all the objects
consume similar bytes of memory
"""
return 1
def mergeCombiners(self, iterator, limit=None):
""" Merge (K,V) pair by mergeCombiner """
if limit is None:
limit = self.memory_limit
# speedup attribute lookup
comb, hfun, objsize = self.agg.mergeCombiners, self._partition, self._object_size
c, data, pdata, batch = 0, self.data, self.pdata, self.batch
for k, v in iterator:
d = pdata[hfun(k)] if pdata else data
d[k] = comb(d[k], v) if k in d else v
if not limit:
continue
c += objsize(v)
if c > batch:
if get_used_memory() > limit:
self._spill()
limit = self._next_limit()
batch /= 2
c = 0
else:
batch *= 1.5
if limit and get_used_memory() >= limit:
self._spill()
def _spill(self):
"""
dump already partitioned data into disks.
It will dump the data in batch for better performance.
"""
global MemoryBytesSpilled, DiskBytesSpilled
path = self._get_spill_dir(self.spills)
if not os.path.exists(path):
os.makedirs(path)
used_memory = get_used_memory()
if not self.pdata:
# The data has not been partitioned, it will iterator the
# dataset once, write them into different files, has no
# additional memory. It only called when the memory goes
# above limit at the first time.
# open all the files for writing
streams = [open(os.path.join(path, str(i)), 'wb')
for i in range(self.partitions)]
for k, v in self.data.items():
h = self._partition(k)
# put one item in batch, make it compatible with load_stream
# it will increase the memory if dump them in batch
self.serializer.dump_stream([(k, v)], streams[h])
for s in streams:
DiskBytesSpilled += s.tell()
s.close()
self.data.clear()
self.pdata.extend([{} for i in range(self.partitions)])
else:
for i in range(self.partitions):
p = os.path.join(path, str(i))
with open(p, "wb") as f:
# dump items in batch
self.serializer.dump_stream(iter(self.pdata[i].items()), f)
self.pdata[i].clear()
DiskBytesSpilled += os.path.getsize(p)
self.spills += 1
gc.collect() # release the memory as much as possible
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
def items(self):
""" Return all merged items as iterator """
if not self.pdata and not self.spills:
return iter(self.data.items())
return self._external_items()
def _external_items(self):
""" Return all partitioned items as iterator """
assert not self.data
if any(self.pdata):
self._spill()
# disable partitioning and spilling when merge combiners from disk
self.pdata = []
try:
for i in range(self.partitions):
for v in self._merged_items(i):
yield v
self.data.clear()
# remove the merged partition
for j in range(self.spills):
path = self._get_spill_dir(j)
os.remove(os.path.join(path, str(i)))
finally:
self._cleanup()
def _merged_items(self, index):
self.data = {}
limit = self._next_limit()
for j in range(self.spills):
path = self._get_spill_dir(j)
p = os.path.join(path, str(index))
# do not check memory during merging
with open(p, "rb") as f:
self.mergeCombiners(self.serializer.load_stream(f), 0)
# limit the total partitions
if (self.scale * self.partitions < self.MAX_TOTAL_PARTITIONS
and j < self.spills - 1
and get_used_memory() > limit):
self.data.clear() # will read from disk again
gc.collect() # release the memory as much as possible
return self._recursive_merged_items(index)
return self.data.items()
def _recursive_merged_items(self, index):
"""
merge the partitioned items and return the as iterator
If one partition can not be fit in memory, then them will be
partitioned and merged recursively.
"""
subdirs = [os.path.join(d, "parts", str(index)) for d in self.localdirs]
m = ExternalMerger(self.agg, self.memory_limit, self.serializer, subdirs,
self.scale * self.partitions, self.partitions, self.batch)
m.pdata = [{} for _ in range(self.partitions)]
limit = self._next_limit()
for j in range(self.spills):
path = self._get_spill_dir(j)
p = os.path.join(path, str(index))
with open(p, 'rb') as f:
m.mergeCombiners(self.serializer.load_stream(f), 0)
if get_used_memory() > limit:
m._spill()
limit = self._next_limit()
return m._external_items()
def _cleanup(self):
""" Clean up all the files in disks """
for d in self.localdirs:
shutil.rmtree(d, True)
class ExternalSorter(object):
"""
ExtenalSorter will divide the elements into chunks, sort them in
memory and dump them into disks, finally merge them back.
The spilling will only happen when the used memory goes above
the limit.
>>> sorter = ExternalSorter(1) # 1M
>>> import random
>>> l = list(range(1024))
>>> random.shuffle(l)
>>> sorted(l) == list(sorter.sorted(l))
True
>>> sorted(l) == list(sorter.sorted(l, key=lambda x: -x, reverse=True))
True
"""
def __init__(self, memory_limit, serializer=None):
self.memory_limit = memory_limit
self.local_dirs = _get_local_dirs("sort")
self.serializer = _compressed_serializer(serializer)
def _get_path(self, n):
""" Choose one directory for spill by number n """
d = self.local_dirs[n % len(self.local_dirs)]
if not os.path.exists(d):
os.makedirs(d)
return os.path.join(d, str(n))
def _next_limit(self):
"""
Return the next memory limit. If the memory is not released
after spilling, it will dump the data only when the used memory
starts to increase.
"""
return max(self.memory_limit, get_used_memory() * 1.05)
def sorted(self, iterator, key=None, reverse=False):
"""
Sort the elements in iterator, do external sort when the memory
goes above the limit.
"""
global MemoryBytesSpilled, DiskBytesSpilled
batch, limit = 100, self._next_limit()
chunks, current_chunk = [], []
iterator = iter(iterator)
while True:
# pick elements in batch
chunk = list(itertools.islice(iterator, batch))
current_chunk.extend(chunk)
if len(chunk) < batch:
break
used_memory = get_used_memory()
if used_memory > limit:
# sort them inplace will save memory
current_chunk.sort(key=key, reverse=reverse)
path = self._get_path(len(chunks))
with open(path, 'wb') as f:
self.serializer.dump_stream(current_chunk, f)
def load(f):
for v in self.serializer.load_stream(f):
yield v
# close the file explicit once we consume all the items
# to avoid ResourceWarning in Python3
f.close()
chunks.append(load(open(path, 'rb')))
current_chunk = []
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
DiskBytesSpilled += os.path.getsize(path)
os.unlink(path) # data will be deleted after close
elif not chunks:
batch = min(int(batch * 1.5), 10000)
current_chunk.sort(key=key, reverse=reverse)
if not chunks:
return current_chunk
if current_chunk:
chunks.append(iter(current_chunk))
return heapq.merge(chunks, key=key, reverse=reverse)
class ExternalList(object):
"""
ExternalList can have many items which cannot be hold in memory in
the same time.
>>> l = ExternalList(list(range(100)))
>>> len(l)
100
>>> l.append(10)
>>> len(l)
101
>>> for i in range(20240):
... l.append(i)
>>> len(l)
20341
>>> import pickle
>>> l2 = pickle.loads(pickle.dumps(l))
>>> len(l2)
20341
>>> list(l2)[100]
10
"""
LIMIT = 10240
def __init__(self, values):
self.values = values
self.count = len(values)
self._file = None
self._ser = None
def __getstate__(self):
if self._file is not None:
self._file.flush()
with os.fdopen(os.dup(self._file.fileno()), "rb") as f:
f.seek(0)
serialized = f.read()
else:
serialized = b''
return self.values, self.count, serialized
def __setstate__(self, item):
self.values, self.count, serialized = item
if serialized:
self._open_file()
self._file.write(serialized)
else:
self._file = None
self._ser = None
def __iter__(self):
if self._file is not None:
self._file.flush()
# read all items from disks first
with os.fdopen(os.dup(self._file.fileno()), 'rb') as f:
f.seek(0)
for v in self._ser.load_stream(f):
yield v
for v in self.values:
yield v
def __len__(self):
return self.count
def append(self, value):
self.values.append(value)
self.count += 1
# dump them into disk if the key is huge
if len(self.values) >= self.LIMIT:
self._spill()
def _open_file(self):
dirs = _get_local_dirs("objects")
d = dirs[id(self) % len(dirs)]
if not os.path.exists(d):
os.makedirs(d)
p = os.path.join(d, str(id(self)))
self._file = open(p, "w+b", 65536)
self._ser = BatchedSerializer(CompressedSerializer(PickleSerializer()), 1024)
os.unlink(p)
def __del__(self):
if self._file:
self._file.close()
self._file = None
def _spill(self):
""" dump the values into disk """
global MemoryBytesSpilled, DiskBytesSpilled
if self._file is None:
self._open_file()
used_memory = get_used_memory()
pos = self._file.tell()
self._ser.dump_stream(self.values, self._file)
self.values = []
gc.collect()
DiskBytesSpilled += self._file.tell() - pos
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
class ExternalListOfList(ExternalList):
"""
An external list for list.
>>> l = ExternalListOfList([[i, i] for i in range(100)])
>>> len(l)
200
>>> l.append(range(10))
>>> len(l)
210
>>> len(list(l))
210
"""
def __init__(self, values):
ExternalList.__init__(self, values)
self.count = sum(len(i) for i in values)
def append(self, value):
ExternalList.append(self, value)
# already counted 1 in ExternalList.append
self.count += len(value) - 1
def __iter__(self):
for values in ExternalList.__iter__(self):
for v in values:
yield v
class GroupByKey(object):
"""
Group a sorted iterator as [(k1, it1), (k2, it2), ...]
>>> k = [i // 3 for i in range(6)]
>>> v = [[i] for i in range(6)]
>>> g = GroupByKey(zip(k, v))
>>> [(k, list(it)) for k, it in g]
[(0, [0, 1, 2]), (1, [3, 4, 5])]
"""
def __init__(self, iterator):
self.iterator = iterator
def __iter__(self):
key, values = None, None
for k, v in self.iterator:
if values is not None and k == key:
values.append(v)
else:
if values is not None:
yield (key, values)
key = k
values = ExternalListOfList([v])
if values is not None:
yield (key, values)
class ExternalGroupBy(ExternalMerger):
"""
Group by the items by key. If any partition of them can not been
hold in memory, it will do sort based group by.
This class works as follows:
- It repeatedly group the items by key and save them in one dict in
memory.
- When the used memory goes above memory limit, it will split
the combined data into partitions by hash code, dump them
into disk, one file per partition. If the number of keys
in one partitions is smaller than 1000, it will sort them
by key before dumping into disk.
- Then it goes through the rest of the iterator, group items
by key into different dict by hash. Until the used memory goes over
memory limit, it dump all the dicts into disks, one file per
dict. Repeat this again until combine all the items. It
also will try to sort the items by key in each partition
before dumping into disks.
- It will yield the grouped items partitions by partitions.
If the data in one partitions can be hold in memory, then it
will load and combine them in memory and yield.
- If the dataset in one partition cannot be hold in memory,
it will sort them first. If all the files are already sorted,
it merge them by heap.merge(), so it will do external sort
for all the files.
- After sorting, `GroupByKey` class will put all the continuous
items with the same key as a group, yield the values as
an iterator.
"""
SORT_KEY_LIMIT = 1000
def flattened_serializer(self):
assert isinstance(self.serializer, BatchedSerializer)
ser = self.serializer
return FlattenedValuesSerializer(ser, 20)
def _object_size(self, obj):
return len(obj)
def _spill(self):
"""
dump already partitioned data into disks.
"""
global MemoryBytesSpilled, DiskBytesSpilled
path = self._get_spill_dir(self.spills)
if not os.path.exists(path):
os.makedirs(path)
used_memory = get_used_memory()
if not self.pdata:
# The data has not been partitioned, it will iterator the
# data once, write them into different files, has no
# additional memory. It only called when the memory goes
# above limit at the first time.
# open all the files for writing
streams = [open(os.path.join(path, str(i)), 'wb')
for i in range(self.partitions)]
# If the number of keys is small, then the overhead of sort is small
# sort them before dumping into disks
self._sorted = len(self.data) < self.SORT_KEY_LIMIT
if self._sorted:
self.serializer = self.flattened_serializer()
for k in sorted(self.data.keys()):
h = self._partition(k)
self.serializer.dump_stream([(k, self.data[k])], streams[h])
else:
for k, v in self.data.items():
h = self._partition(k)
self.serializer.dump_stream([(k, v)], streams[h])
for s in streams:
DiskBytesSpilled += s.tell()
s.close()
self.data.clear()
# self.pdata is cached in `mergeValues` and `mergeCombiners`
self.pdata.extend([{} for i in range(self.partitions)])
else:
for i in range(self.partitions):
p = os.path.join(path, str(i))
with open(p, "wb") as f:
# dump items in batch
if self._sorted:
# sort by key only (stable)
sorted_items = sorted(self.pdata[i].items(), key=operator.itemgetter(0))
self.serializer.dump_stream(sorted_items, f)
else:
self.serializer.dump_stream(self.pdata[i].items(), f)
self.pdata[i].clear()
DiskBytesSpilled += os.path.getsize(p)
self.spills += 1
gc.collect() # release the memory as much as possible
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
def _merged_items(self, index):
size = sum(os.path.getsize(os.path.join(self._get_spill_dir(j), str(index)))
for j in range(self.spills))
# if the memory can not hold all the partition,
# then use sort based merge. Because of compression,
# the data on disks will be much smaller than needed memory
if size >= self.memory_limit << 17: # * 1M / 8
return self._merge_sorted_items(index)
self.data = {}
for j in range(self.spills):
path = self._get_spill_dir(j)
p = os.path.join(path, str(index))
# do not check memory during merging
with open(p, "rb") as f:
self.mergeCombiners(self.serializer.load_stream(f), 0)
return self.data.items()
def _merge_sorted_items(self, index):
""" load a partition from disk, then sort and group by key """
def load_partition(j):
path = self._get_spill_dir(j)
p = os.path.join(path, str(index))
with open(p, 'rb', 65536) as f:
for v in self.serializer.load_stream(f):
yield v
disk_items = [load_partition(j) for j in range(self.spills)]
if self._sorted:
# all the partitions are already sorted
sorted_items = heapq.merge(disk_items, key=operator.itemgetter(0))
else:
# Flatten the combined values, so it will not consume huge
# memory during merging sort.
ser = self.flattened_serializer()
sorter = ExternalSorter(self.memory_limit, ser)
sorted_items = sorter.sorted(itertools.chain(*disk_items),
key=operator.itemgetter(0))
return ((k, vs) for k, vs in GroupByKey(sorted_items))
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
| wangyixiaohuihui/spark2-annotation | python/pyspark/shuffle.py | Python | apache-2.0 | 28,525 |
# -*- coding:utf-8 -*-
from werkzeug.routing import Map, Submount
import libcloud
from libcloud_rest.api.handlers import app_handler
from libcloud_rest.api.handlers.compute import compute_handler
from libcloud_rest.api.handlers.dns import dns_handler
from libcloud_rest.api.handlers.loadbalancer import lb_handler
from libcloud_rest.api.handlers.storage import storage_handler
from libcloud_rest.api.versions import versions
api_version = '/%s' % (versions[libcloud.__version__])
urls = Map([
app_handler.get_rules(),
Submount(api_version, [
compute_handler.get_rules(),
dns_handler.get_rules(),
lb_handler.get_rules(),
storage_handler.get_rules(),
])
])
| islamgulov/libcloud.rest | libcloud_rest/api/urls.py | Python | apache-2.0 | 704 |
import zstackwoodpecker.operations.scheduler_operations as sch_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.tag_operations as tag_ops
import zstackwoodpecker.operations.backupstorage_operations as bs_ops
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import time
import os
vmBackup = 'vmBackup'
volumeBackup = 'volumeBackup'
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
job1 = None
job2 = None
job_group = None
trigger1 = None
trigger2 = None
def test():
global job1
global job2
global job_group
global trigger1
global trigger2
imagestore = test_lib.lib_get_image_store_backup_storage()
if imagestore == None:
test_util.test_skip('Required imagestore to test')
cond = res_ops.gen_query_conditions("tag", '=', "allowbackup")
allow_backup_tags = res_ops.query_resource(res_ops.SYSTEM_TAG, cond)
if not allow_backup_tags:
tag_ops.create_system_tag(resourceType="ImageStoreBackupStorageVO", resourceUuid=imagestore.uuid, tag="allowbackup")
cond = res_ops.gen_query_conditions("tag", '=', "remotebackup")
tags = res_ops.query_resource(res_ops.SYSTEM_TAG, cond)
if not tags:
cond = res_ops.gen_query_conditions("state", '=', "Enabled")
cond = res_ops.gen_query_conditions("status", '=', "Connected")
hosts = res_ops.query_resource(res_ops.HOST, cond)
if not hosts:
test_util.test_fail("No host available for adding imagestore for backup test")
host = hosts[0]
bs_option = test_util.ImageStoreBackupStorageOption()
bs_option.set_name("remote_bs")
bs_option.set_url("/home/sftpBackupStorage")
bs_option.set_hostname(host.managementIp)
bs_option.set_password('password')
bs_option.set_sshPort(host.sshPort)
bs_option.set_username(host.username)
bs_option.set_system_tags(["remotebackup"])
bs_inv = bs_ops.create_image_store_backup_storage(bs_option)
bs_ops.attach_backup_storage(bs_inv.uuid, host.zoneUuid)
remote_uuid = bs_inv.uuid
else:
remote_uuid = tags[0].resourceUuid
vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
volume = test_stub.create_volume()
volume.attach(vm2)
test_obj_dict.add_vm(vm1)
test_obj_dict.add_vm(vm2)
test_obj_dict.add_volume(volume)
parameters = {'retentionType': 'Count',
'retentionValue': '10',
'backupStorageUuids': imagestore.uuid,
'remoteBackupStorageUuid': remote_uuid,
'networkWriteBandwidth': '',
'networkReadBandwidth': '',
'volumeReadBandwidth': '',
'fullBackupTriggerUuid': '',
'volumeWriteBandwidth': ''}
test_util.test_logger(parameters)
job1 = sch_ops.create_scheduler_job(name='vm1', description='vm1 backup', target_uuid=vm1.get_vm().rootVolumeUuid,
type=vmBackup, parameters=parameters)
job2 = sch_ops.create_scheduler_job(name='vm2', description='vm2 backup',
target_uuid=vm2.get_vm().rootVolumeUuid, type=vmBackup,
parameters=parameters)
name1 = 'job_group'
job_group = sch_ops.create_scheduler_job_group(name=name1, description='vmbackup', type=vmBackup,
parameters=parameters)
cond = res_ops.gen_query_conditions('uuid', '=', job_group.uuid)
sch_ops.add_jobs_to_job_group([job1.uuid], job_group.uuid)
job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0]
assert len(job_group_inv.jobsUuid) == 1
sch_ops.add_jobs_to_job_group([job2.uuid], job_group.uuid)
job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0]
assert len(job_group_inv.jobsUuid) == 2
trigger1 = sch_ops.create_scheduler_trigger('10min', start_time=int(time.time() + 5), type='cron',
cron='*0 0/10 * * * ?')
sch_ops.add_scheduler_job_group_to_trigger(trigger1.uuid, job_group.uuid)
job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0]
assert len(job_group_inv.triggersUuid) == 1
tag_ops.delete_tag(allow_backup_tags[0].uuid)
cond = res_ops.gen_query_conditions('uuid', '=', job_group.uuid)
job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0]
assert job_group_inv.state == "Disabled"
test_lib.lib_robot_cleanup(test_obj_dict)
sch_ops.del_scheduler_job(job1.uuid)
sch_ops.del_scheduler_job(job2.uuid)
sch_ops.del_scheduler_job_group(job_group.uuid)
sch_ops.del_scheduler_trigger(trigger1.uuid)
def error_cleanup():
global job1,job2,job_group,trigger1,trigger2
test_lib.lib_error_cleanup(test_obj_dict)
if job1:
sch_ops.del_scheduler_job(job1.uuid)
if job2:
sch_ops.del_scheduler_job(job2.uuid)
if job_group:
sch_ops.del_scheduler_job_group(job_group.uuid)
if trigger1:
sch_ops.del_scheduler_trigger(trigger1.uuid)
if trigger2:
sch_ops.del_scheduler_trigger(trigger2.uuid)
| zstackio/zstack-woodpecker | integrationtest/vm/virtualrouter/scheduler/test_delete_local_backupstorage.py | Python | apache-2.0 | 5,456 |
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import atexit
from functools import wraps
import yaml
import os
import requests
import time
from pyvcloud import vcloudair
from pyvcloud.schema.vcd.v1_5.schemas.vcloud import taskType
from cloudify import ctx
from cloudify import context
from cloudify import exceptions as cfy_exc
TASK_RECHECK_TIMEOUT = 3
RELOGIN_TIMEOUT = 3
LOGIN_RETRY_NUM = 5
TASK_STATUS_SUCCESS = 'success'
TASK_STATUS_ERROR = 'error'
STATUS_COULD_NOT_BE_CREATED = -1
STATUS_UNRESOLVED = 0
STATUS_RESOLVED = 1
STATUS_DEPLOYED = 2
STATUS_SUSPENDED = 3
STATUS_POWERED_ON = 4
STATUS_POWERED_OFF = 8
STATUS_WAITING_FOR_USER_INPUT = 5
STATUS_UNKNOWN_STATE = 6
STATUS_UNRECOGNIZED_STATE = 7
STATUS_INCONSISTENT_STATE = 9
VCLOUD_STATUS_MAP = {
-1: "Could not be created",
0: "Unresolved",
1: "Resolved",
2: "Deployed",
3: "Suspended",
4: "Powered on",
5: "Waiting for user input",
6: "Unknown state",
7: "Unrecognized state",
8: "Powered off",
9: "Inconsistent state",
10: "Children do not all have the same status",
11: "Upload initiated, OVF descriptor pending",
12: "Upload initiated, copying contents",
13: "Upload initiated , disk contents pending",
14: "Upload has been quarantined",
15: "Upload quarantine period has expired"
}
SUBSCRIPTION_SERVICE_TYPE = 'subscription'
ONDEMAND_SERVICE_TYPE = 'ondemand'
PRIVATE_SERVICE_TYPE = 'vcd'
SESSION_TOKEN = 'session_token'
ORG_URL = 'org_url'
VCLOUD_CONFIG = 'vcloud_config'
def transform_resource_name(res, ctx):
"""
return name as prefix from bootstrap context + resource name
"""
if isinstance(res, basestring):
res = {'name': res}
if not isinstance(res, dict):
raise ValueError("transform_resource_name() expects either string or "
"dict as the first parameter")
pfx = ctx.bootstrap_context.resources_prefix
if not pfx:
return get_mandatory(res, 'name')
name = get_mandatory(res, 'name')
res['name'] = pfx + name
if name.startswith(pfx):
ctx.logger.warn("Prefixing resource '{0}' with '{1}' but it "
"already has this prefix".format(name, pfx))
else:
ctx.logger.info("Transformed resource name '{0}' to '{1}'".format(
name, res['name']))
return res['name']
class Config(object):
"""
load global config
"""
VCLOUD_CONFIG_PATH_ENV_VAR = 'VCLOUD_CONFIG_PATH'
VCLOUD_CONFIG_PATH_DEFAULT = '~/vcloud_config.yaml'
def get(self):
"""
return settings from ~/vcloud_config.yaml
"""
cfg = {}
env_name = self.VCLOUD_CONFIG_PATH_ENV_VAR
default_location_tpl = self.VCLOUD_CONFIG_PATH_DEFAULT
default_location = os.path.expanduser(default_location_tpl)
config_path = os.getenv(env_name, default_location)
try:
with open(config_path) as f:
cfg = yaml.load(f.read())
if not cfg:
cfg = {}
except IOError:
pass
return cfg
class VcloudAirClient(object):
config = Config
def get(self, config=None, *args, **kw):
"""
return new vca client
"""
static_config = self.__class__.config().get()
cfg = {}
cfg.update(static_config)
if config:
cfg.update(config)
return self.connect(cfg)
def connect(self, cfg):
"""
login to instance described in settings
"""
url = cfg.get('url')
username = cfg.get('username')
password = cfg.get('password')
token = cfg.get('token')
service = cfg.get('service')
org_name = cfg.get('org')
service_type = cfg.get('service_type', SUBSCRIPTION_SERVICE_TYPE)
instance = cfg.get('instance')
org_url = cfg.get(ORG_URL, None)
api_version = cfg.get('api_version', '5.6')
session_token = cfg.get(SESSION_TOKEN)
org_url = cfg.get(ORG_URL)
if not (all([url, token]) or all([url, username, password]) or session_token):
raise cfy_exc.NonRecoverableError(
"Login credentials must be specified.")
if (service_type == SUBSCRIPTION_SERVICE_TYPE and not (
service and org_name
)):
raise cfy_exc.NonRecoverableError(
"vCloud service and vDC must be specified")
if service_type == SUBSCRIPTION_SERVICE_TYPE:
vcloud_air = self._subscription_login(
url, username, password, token, service, org_name,
session_token, org_url)
elif service_type == ONDEMAND_SERVICE_TYPE:
vcloud_air = self._ondemand_login(
url, username, password, token, instance,
session_token, org_url)
# The actual service type for private is 'vcd', but we should accept
# 'private' as well, for user friendliness of inputs
elif service_type in (PRIVATE_SERVICE_TYPE, 'private'):
vcloud_air = self._private_login(
url, username, password, token, org_name, org_url, api_version)
else:
raise cfy_exc.NonRecoverableError(
"Unrecognized service type: {0}".format(service_type))
return vcloud_air
def _subscription_login(self, url, username, password, token, service,
org_name, session_token=None, org_url=None):
"""
login to subscription service
"""
version = '5.6'
logined = False
vdc_logined = False
vca = vcloudair.VCA(
url, username, service_type=SUBSCRIPTION_SERVICE_TYPE,
version=version)
if session_token:
if session_login(vca, org_url, session_token, version):
return vca
else:
raise cfy_exc.NonRecoverableError("Invalid session credentials")
# login with token
if token:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(token=token)
if logined is False:
ctx.logger.info("Login using token failed.")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login using token successful.")
break
# outdated token, try login by password
if logined is False and password:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(password)
if logined is False:
ctx.logger.info("Login using password failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login using password successful.")
break
# can't login to system at all
if logined is False:
raise cfy_exc.NonRecoverableError("Invalid login credentials")
for _ in range(LOGIN_RETRY_NUM):
vdc_logined = vca.login_to_org(service, org_name)
if vdc_logined is False:
ctx.logger.info("Login to VDC failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login to VDC successful.")
break
# we can login to system,
# but have some troubles with login to organization,
# lets retry later
if vdc_logined is False:
raise cfy_exc.RecoverableError(message="Could not login to VDC",
retry_after=RELOGIN_TIMEOUT)
atexit.register(vca.logout)
return vca
def _ondemand_login(self, url, username, password, token, instance_id,
session_token=None, org_url=None):
"""
login to ondemand service
"""
def get_instance(vca, instance_id):
instances = vca.get_instances() or []
for instance in instances:
if instance['id'] == instance_id:
return instance
version = '5.7'
if instance_id is None:
raise cfy_exc.NonRecoverableError(
"Instance ID should be specified for OnDemand login")
logined = False
instance_logined = False
vca = vcloudair.VCA(
url, username, service_type=ONDEMAND_SERVICE_TYPE, version=version)
if session_token:
if session_login(vca, org_url, session_token, version):
return vca
else:
raise cfy_exc.NonRecoverableError("Invalid session credentials")
# login with token
if token:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(token=token)
if logined is False:
ctx.logger.info("Login using token failed.")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login using token successful.")
break
# outdated token, try login by password
if logined is False and password:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(password)
if logined is False:
ctx.logger.info("Login using password failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login using password successful.")
break
# can't login to system at all
if logined is False:
raise cfy_exc.NonRecoverableError("Invalid login credentials")
instance = get_instance(vca, instance_id)
if instance is None:
raise cfy_exc.NonRecoverableError(
"Instance {0} could not be found.".format(instance_id))
for _ in range(LOGIN_RETRY_NUM):
instance_logined = vca.login_to_instance(
instance_id, password, token, None)
if instance_logined is False:
ctx.logger.info("Login to instance failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login to instance successful.")
break
for _ in range(LOGIN_RETRY_NUM):
instance_logined = vca.login_to_instance(
instance_id,
None,
vca.vcloud_session.token,
vca.vcloud_session.org_url)
if instance_logined is False:
ctx.logger.info("Login to instance failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login to instance successful.")
break
# we can login to system,
# but have some troubles with login to instance,
# lets retry later
if instance_logined is False:
raise cfy_exc.RecoverableError(
message="Could not login to instance",
retry_after=RELOGIN_TIMEOUT)
atexit.register(vca.logout)
return vca
def _private_login(self, url, username, password, token, org_name,
org_url=None, api_version='5.6'):
"""
login to private instance
"""
logined = False
vca = vcloudair.VCA(
host=url,
username=username,
service_type=PRIVATE_SERVICE_TYPE,
version=api_version)
if logined is False and password:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(password, org=org_name)
if logined is False:
ctx.logger.info("Login using password failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
token = vca.token
# Set org_url based on the session, no matter what was
# passed in to the application, as this is guaranteed to
# be correct
org_url = vca.vcloud_session.org_url
ctx.logger.info("Login using password successful.")
break
# Private mode requires being logged in with a token otherwise you
# don't seem to be able to retrieve any VDCs
if token:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(token=token, org_url=org_url)
if logined is False:
ctx.logger.info("Login using token failed.")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login using token successful.")
break
if logined is False:
raise cfy_exc.NonRecoverableError("Invalid login credentials")
atexit.register(vca.logout)
return vca
def with_vca_client(f):
"""
add vca client to function params
"""
@wraps(f)
def wrapper(*args, **kw):
config = None
prop = None
if ctx.type == context.NODE_INSTANCE:
config = ctx.node.properties.get(VCLOUD_CONFIG)
prop = ctx.instance.runtime_properties
elif ctx.type == context.RELATIONSHIP_INSTANCE:
config = ctx.source.node.properties.get(VCLOUD_CONFIG)
prop = ctx.source.instance.runtime_properties
else:
raise cfy_exc.NonRecoverableError("Unsupported context")
if config and prop:
config[SESSION_TOKEN] = prop.get(SESSION_TOKEN)
config[ORG_URL] = prop.get(ORG_URL)
client = VcloudAirClient().get(config=config)
kw['vca_client'] = client
return f(*args, **kw)
return wrapper
def wait_for_task(vca_client, task):
"""
check status of current task and make request for recheck
task status in case when we have not well defined state
(not error and not success or by timeout)
"""
WAIT_TIME_MAX_MINUTES = 30
MAX_ATTEMPTS = WAIT_TIME_MAX_MINUTES * 60 / TASK_RECHECK_TIMEOUT
ctx.logger.debug('Maximun task wait time {0} minutes.'
.format(WAIT_TIME_MAX_MINUTES))
ctx.logger.debug('Task recheck after {0} seconds.'
.format(TASK_RECHECK_TIMEOUT))
status = task.get_status()
for attempt in range(MAX_ATTEMPTS):
ctx.logger.debug('Attempt: {0}/{1}.'.format(attempt + 1, MAX_ATTEMPTS))
if status == TASK_STATUS_SUCCESS:
ctx.logger.debug('Task completed in {0} seconds'
.format(attempt * TASK_RECHECK_TIMEOUT))
return
if status == TASK_STATUS_ERROR:
error = task.get_Error()
raise cfy_exc.NonRecoverableError(
"Error during task execution: {0}".format(error.get_message()))
time.sleep(TASK_RECHECK_TIMEOUT)
response = requests.get(
task.get_href(),
headers=vca_client.vcloud_session.get_vcloud_headers())
task = taskType.parseString(response.content, True)
status = task.get_status()
raise cfy_exc.NonRecoverableError("Wait for task timeout.")
def get_vcloud_config():
"""
get vcloud config from node properties
"""
config = None
if ctx.type == context.NODE_INSTANCE:
config = ctx.node.properties.get(VCLOUD_CONFIG)
elif ctx.type == context.RELATIONSHIP_INSTANCE:
config = ctx.source.node.properties.get(VCLOUD_CONFIG)
else:
raise cfy_exc.NonRecoverableError("Unsupported context")
static_config = Config().get()
if config:
static_config.update(config)
return static_config
def get_mandatory(obj, parameter):
"""
return value for field or raise exception if field does not exist
"""
value = obj.get(parameter)
if value:
return value
else:
raise cfy_exc.NonRecoverableError(
"Mandatory parameter {0} is absent".format(parameter))
def is_subscription(service_type):
"""
check service type is subscription or empty
"""
return not service_type or service_type == SUBSCRIPTION_SERVICE_TYPE
def is_ondemand(service_type):
"""
check service type is ondemand
"""
return service_type == ONDEMAND_SERVICE_TYPE
def error_response(obj):
"""
return description of response error
"""
try:
return obj.response.content
except AttributeError:
return ''
def session_login(vca, org_url, session_token, version):
vcs = vcloudair.VCS(org_url, None, None, None, org_url, org_url, version)
for _ in range(LOGIN_RETRY_NUM):
if not vcs.login(token=session_token):
ctx.logger.info("Login using session token failed.")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
vca.vcloud_session = vcs
ctx.logger.info("Login using session token successful.")
return True
return False
| denismakogon/tosca-vcloud-plugin | vcloud_plugin_common/__init__.py | Python | apache-2.0 | 17,925 |
# -*- coding: utf-8 -*-
"""
Installs and configures amqp
"""
import logging
import uuid
import os
from packstack.installer import validators
from packstack.installer import processors
from packstack.installer import basedefs
from packstack.installer import utils
from packstack.modules.common import filtered_hosts
from packstack.modules.ospluginutils import (getManifestTemplate,
appendManifestFile)
#------------------ oVirt installer initialization ------------------
PLUGIN_NAME = "AMQP"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
params = [
{"CMD_OPTION": "amqp-backend",
"USAGE": ("Set the AMQP service backend. Allowed values are: "
"qpid, rabbitmq"),
"PROMPT": "Set the AMQP service backend",
"OPTION_LIST": ["qpid", "rabbitmq"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "rabbitmq",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_AMQP_BACKEND",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_AMQP_SERVER']},
{"CMD_OPTION": "amqp-host",
"USAGE": ("The IP address of the server on which to install the "
"AMQP service"),
"PROMPT": "Enter the IP address of the AMQP service",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_ssh],
"DEFAULT_VALUE": utils.get_localhost_ip(),
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_HOST",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-enable-ssl",
"USAGE": "Enable SSL for the AMQP service",
"PROMPT": "Enable SSL for the AMQP service?",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_AMQP_ENABLE_SSL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-enable-auth",
"USAGE": "Enable Authentication for the AMQP service",
"PROMPT": "Enable Authentication for the AMQP service?",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_AMQP_ENABLE_AUTH",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
]
group = {"GROUP_NAME": "AMQP",
"DESCRIPTION": "AMQP Config parameters",
"PRE_CONDITION": False,
"PRE_CONDITION_MATCH": True,
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, params)
params = [
{"CMD_OPTION": "amqp-nss-certdb-pw",
"USAGE": ("The password for the NSS certificate database of the AMQP "
"service"),
"PROMPT": "Enter the password for NSS certificate database",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"PROCESSORS": [processors.process_password],
"MASK_INPUT": True,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_NSS_CERTDB_PW",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CMD_OPTION": "amqp-ssl-port",
"USAGE": ("The port in which the AMQP service listens to SSL "
"connections"),
"PROMPT": "Enter the SSL port for the AMQP service",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "5671",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_SSL_PORT",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-ssl-cert-file",
"USAGE": ("The filename of the certificate that the AMQP service "
"is going to use"),
"PROMPT": ("Enter the filename of the SSL certificate for the AMQP "
"service"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "/etc/pki/tls/certs/amqp_selfcert.pem",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_SSL_CERT_FILE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-ssl-key-file",
"USAGE": ("The filename of the private key that the AMQP service "
"is going to use"),
"PROMPT": "Enter the private key filename",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "/etc/pki/tls/private/amqp_selfkey.pem",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_SSL_KEY_FILE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-ssl-self-signed",
"USAGE": "Auto Generates self signed SSL certificate and key",
"PROMPT": "Generate Self Signed SSL Certificate",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "y",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_SSL_SELF_SIGNED",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
]
group = {"GROUP_NAME": "AMQPSSL",
"DESCRIPTION": "AMQP Config SSL parameters",
"PRE_CONDITION": "CONFIG_AMQP_ENABLE_SSL",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, params)
params = [
{"CMD_OPTION": "amqp-auth-user",
"USAGE": "User for amqp authentication",
"PROMPT": "Enter the user for amqp authentication",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "amqp_user",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_AUTH_USER",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-auth-password",
"USAGE": "Password for user authentication",
"PROMPT": "Enter the password for user authentication",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_not_empty],
"PROCESSORS": [processors.process_password],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"MASK_INPUT": True,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_AUTH_PASSWORD",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
]
group = {"GROUP_NAME": "AMQPAUTH",
"DESCRIPTION": "AMQP Config Athentication parameters",
"PRE_CONDITION": "CONFIG_AMQP_ENABLE_AUTH",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, params)
def initSequences(controller):
amqpsteps = [
{'title': 'Adding AMQP manifest entries',
'functions': [create_manifest]}
]
controller.addSequence("Installing AMQP", [], [], amqpsteps)
#-------------------------- step functions --------------------------
def create_manifest(config, messages):
server = utils.ScriptRunner(config['CONFIG_AMQP_HOST'])
if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
config['CONFIG_AMQP_ENABLE_SSL'] = 'true'
config['CONFIG_AMQP_PROTOCOL'] = 'ssl'
config['CONFIG_AMQP_CLIENTS_PORT'] = "5671"
if config['CONFIG_AMQP_SSL_SELF_SIGNED'] == 'y':
server.append(
"openssl req -batch -new -x509 -nodes -keyout %s "
"-out %s -days 1095"
% (config['CONFIG_AMQP_SSL_KEY_FILE'],
config['CONFIG_AMQP_SSL_CERT_FILE'])
)
server.execute()
else:
# Set default values
config['CONFIG_AMQP_CLIENTS_PORT'] = "5672"
config['CONFIG_AMQP_SSL_PORT'] = "5671"
config['CONFIG_AMQP_SSL_CERT_FILE'] = ""
config['CONFIG_AMQP_SSL_KEY_FILE'] = ""
config['CONFIG_AMQP_NSS_CERTDB_PW'] = ""
config['CONFIG_AMQP_ENABLE_SSL'] = 'false'
config['CONFIG_AMQP_PROTOCOL'] = 'tcp'
if config['CONFIG_AMQP_ENABLE_AUTH'] == 'n':
config['CONFIG_AMQP_AUTH_PASSWORD'] = 'guest'
config['CONFIG_AMQP_AUTH_USER'] = 'guest'
manifestfile = "%s_amqp.pp" % config['CONFIG_AMQP_HOST']
manifestdata = getManifestTemplate('amqp.pp')
# All hosts should be able to talk to amqp
config['FIREWALL_SERVICE_NAME'] = "amqp"
config['FIREWALL_PORTS'] = "['5671', '5672']"
config['FIREWALL_CHAIN'] = "INPUT"
config['FIREWALL_PROTOCOL'] = 'tcp'
for host in filtered_hosts(config, exclude=False):
config['FIREWALL_ALLOWED'] = "'%s'" % host
config['FIREWALL_SERVICE_ID'] = "amqp_%s" % host
manifestdata += getManifestTemplate("firewall.pp")
appendManifestFile(manifestfile, manifestdata, 'pre')
| fr34k8/packstack | packstack/plugins/amqp_002.py | Python | apache-2.0 | 9,717 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-07-16 10:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testCon', '0010_auto_20160713_1534'),
]
operations = [
migrations.AddField(
model_name='account_teacher',
name='teacherName',
field=models.CharField(default='TeacherName', max_length=20),
preserve_default=False,
),
migrations.AddField(
model_name='classinfo',
name='classCode',
field=models.CharField(default=0, max_length=10),
preserve_default=False,
),
migrations.AddField(
model_name='classinfo',
name='username',
field=models.CharField(default=0, max_length=20),
preserve_default=False,
),
migrations.AlterField(
model_name='account_teacher',
name='username',
field=models.CharField(max_length=20, unique=True),
),
]
| Jameeeees/Mag1C_baNd | kekangpai/testCon/migrations/0011_auto_20160716_1025.py | Python | apache-2.0 | 1,103 |
import sys
import json
import flask
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.cors import origin
from parse import ProfileParser, IHMEParser
from flask import render_template
from flask import Response
from werkzeug.contrib.fixers import ProxyFix
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Value(db.Model):
id = db.Column(db.Integer, primary_key=True)
country = db.Column(db.String(120))
code = db.Column(db.String(20))
value = db.Column(db.String(120))
def __init__(self, country, code, value):
self.country = country
self.code = code
self.value = value
def __repr__(self):
return '<Code %s Value %r>' % (self.code, self.value)
class DALY(db.Model):
id = db.Column(db.Integer, primary_key=True)
country = db.Column(db.String(120))
code = db.Column(db.String(20))
rank = db.Column(db.Integer)
cause = db.Column(db.String(30))
perc = db.Column(db.Float)
color = db.Column(db.String(10))
def __init__(self, country, code, rank, cause, perc, color):
self.country = country
self.code = code
self.rank = rank
self.cause = cause
self.perc = perc
self.color = color
def __repr__(self):
return '<Code %s>' % (self.code)
def populate_database(fn_profile, fn_daly):
parser = ProfileParser(fn_profile)
for country, data in parser.parse_profiles():
for code, value in data.items():
v = Value(country, code, value)
db.session.add(v)
db.session.commit()
parser = IHMEParser(fn_daly)
for data in parser.parse_data():
d = DALY(data["country"], data["code"], data["rank"], data["cause"], data["perc"], data["color"])
db.session.add(d)
db.session.commit()
@app.route("/api/profiles", methods=["GET", "POST"])
@origin('*')
def api_profiles():
country = flask.request.values["country"]
values = dict([
(v.code, v.value) for v in Value.query.filter_by(country=country) if v.code
])
return Response(
response=json.dumps(values, indent=4), status=200, mimetype="application/json"
)
@app.route("/api/daly", methods=["GET", "POST"])
@origin('*')
def api_daly():
country = flask.request.values["country"]
values = [
{
"rank" : v.rank, "text" : v.cause,
"value" : v.perc, "color" : v.color
} for v in DALY.query.filter_by(country=country).order_by('rank') if v.code and v.rank <= 10
]
return Response(
response=json.dumps(values, indent=4), status=200, mimetype="application/json"
)
app.wsgi_app = ProxyFix(app.wsgi_app)
if __name__ == "__main__":
if len(sys.argv) == 4 and sys.argv[1] == "populate":
populate_database(sys.argv[2], sys.argv[3])
else:
app.run(debug=True)
| adieyal/ghwa_profiles | server/server.py | Python | apache-2.0 | 2,916 |
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
# ==========================================================================
# Copyright (C) 2016 Dr. Alejandro Pina Ortega
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==========================================================================
"""
Base class for synchronous motors.
"""
# ==========================================================================
# Program: synchronous.py
# Author: ajpina
# Date: 12/23/16
# Version: 0.1.1
#
# Revision History:
# Date Version Author Description
# - 12/23/16: 0.1.1 ajpina Defines mandatory methods and properties
#
# ==========================================================================
__author__ = 'ajpina'
from abc import abstractmethod
from uffema.machines import RotatingMachine
class Synchronous(RotatingMachine):
@property
@abstractmethod
def stator(self):
return 'Should never see this'
@stator.setter
@abstractmethod
def stator(self, new_stator):
return
@property
@abstractmethod
def rotor(self):
return 'Should never see this'
@rotor.setter
@abstractmethod
def rotor(self, new_rotor):
return
@property
@abstractmethod
def flux(self):
return 'Should never see this'
@flux.setter
@abstractmethod
def flux(self, new_flux):
return
@property
@abstractmethod
def mode(self):
return 'Should never see this'
@mode.setter
@abstractmethod
def mode(self, new_mode):
return
@property
@abstractmethod
def type(self):
return 'Should never see this'
@type.setter
@abstractmethod
def type(self, new_type):
return
def get_machine_type(self):
return 'Synchronous'
def __init__(self, machine_settings, machine_type):
RotatingMachine.__init__(self, machine_settings, machine_type)
self.type = self.type + 'Synchronous::'
| ajpina/uffema | uffema/machines/synchronous.py | Python | apache-2.0 | 2,520 |
# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from oslo_utils import uuidutils
from sahara.service.edp.data_sources.base import DataSourceType
import testtools
class DataSourceBaseTestCase(testtools.TestCase):
def setUp(self):
super(DataSourceBaseTestCase, self).setUp()
self.ds_base = DataSourceType()
def test_construct_url_no_placeholders(self):
base_url = "swift://container/input"
job_exec_id = uuidutils.generate_uuid()
url = self.ds_base.construct_url(base_url, job_exec_id)
self.assertEqual(base_url, url)
def test_construct_url_job_exec_id_placeholder(self):
base_url = "swift://container/input.%JOB_EXEC_ID%.out"
job_exec_id = uuidutils.generate_uuid()
url = self.ds_base.construct_url(base_url, job_exec_id)
self.assertEqual(
"swift://container/input." + job_exec_id + ".out", url)
def test_construct_url_randstr_placeholder(self):
base_url = "swift://container/input.%RANDSTR(4)%.%RANDSTR(7)%.out"
job_exec_id = uuidutils.generate_uuid()
url = self.ds_base.construct_url(base_url, job_exec_id)
self.assertRegex(
url, "swift://container/input\.[a-z]{4}\.[a-z]{7}\.out")
def test_construct_url_randstr_and_job_exec_id_placeholder(self):
base_url = "swift://container/input.%JOB_EXEC_ID%.%RANDSTR(7)%.out"
job_exec_id = uuidutils.generate_uuid()
url = self.ds_base.construct_url(base_url, job_exec_id)
self.assertRegex(
url, "swift://container/input." + job_exec_id + "\.[a-z]{7}\.out")
def test_get_urls(self):
url = 'test://url'
cluster = mock.Mock()
job_exec_id = 'test_id'
self.assertEqual((url, url), self.ds_base.get_urls(url,
cluster, job_exec_id))
| openstack/sahara | sahara/tests/unit/service/edp/data_sources/base_test.py | Python | apache-2.0 | 2,415 |
import ast
n = int(input())
a=[]
s1=s2=0
for i in range(0, n):
a = list(ast.literal_eval(','.join(input().split())))
if(len(a) == n):
s1+=a[i]
s2+=a[n-1-i]
print(abs(s1-s2))
| tpb261/hackerRank | warmup/diag_diff.py | Python | apache-2.0 | 198 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' api module '''
from .metrics import (
MetricsHandler,
MetricsTimelineHandler
)
from .topology import (
TopologyExceptionSummaryHandler,
ListTopologiesJsonHandler,
TopologyLogicalPlanJsonHandler,
TopologyPackingPlanJsonHandler,
TopologyPhysicalPlanJsonHandler,
TopologySchedulerLocationJsonHandler,
TopologyExecutionStateJsonHandler,
TopologyExceptionsJsonHandler,
PidHandler,
JstackHandler,
MemoryHistogramHandler,
JmapHandler
)
| huijunwu/heron | heron/tools/ui/src/python/handlers/api/__init__.py | Python | apache-2.0 | 1,275 |
# Copyright 2015-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import logging
from os import path
LOG = logging.getLogger(__name__)
MAX_PACKAGE_SIZE = 50000000
class PackageUploader(object):
'''TODO: Should we decouple the config from the Object Init'''
def __init__(self, config, profile_name):
self._config = config
self._vpc_config = self._format_vpc_config()
self._aws_session = boto3.session.Session(region_name=config.region,
profile_name=profile_name)
self._lambda_client = self._aws_session.client('lambda')
self.version = None
'''
Calls the AWS methods to upload an existing package and update
the function configuration
returns the package version
'''
def upload_existing(self, pkg):
environment = {'Variables': self._config.variables}
self._validate_package_size(pkg.zip_file)
with open(pkg.zip_file, "rb") as fil:
zip_file = fil.read()
LOG.debug('running update_function_code')
conf_update_resp = None
if self._config.s3_bucket:
self._upload_s3(pkg.zip_file)
conf_update_resp = self._lambda_client.update_function_code(
FunctionName=self._config.name,
S3Bucket=self._config.s3_bucket,
S3Key=self._config.s3_package_name(),
Publish=False,
)
else:
conf_update_resp = self._lambda_client.update_function_code(
FunctionName=self._config.name,
ZipFile=zip_file,
Publish=False,
)
LOG.debug("AWS update_function_code response: %s"
% conf_update_resp)
LOG.debug('running update_function_configuration')
response = self._lambda_client.update_function_configuration(
FunctionName=self._config.name,
Handler=self._config.handler,
Role=self._config.role,
Description=self._config.description,
Timeout=self._config.timeout,
MemorySize=self._config.memory,
VpcConfig=self._vpc_config,
Environment=environment,
TracingConfig=self._config.tracing,
Runtime=self._config.runtime,
)
LOG.debug("AWS update_function_configuration response: %s"
% response)
version = response.get('Version')
# Publish the version after upload and config update if needed
if self._config.publish:
resp = self._lambda_client.publish_version(
FunctionName=self._config.name,
)
LOG.debug("AWS publish_version response: %s" % resp)
version = resp.get('Version')
return version
'''
Creates and uploads a new lambda function
returns the package version
'''
def upload_new(self, pkg):
environment = {'Variables': self._config.variables}
code = {}
if self._config.s3_bucket:
code = {'S3Bucket': self._config.s3_bucket,
'S3Key': self._config.s3_package_name()}
self._upload_s3(pkg.zip_file)
else:
self._validate_package_size(pkg.zip_file)
with open(pkg.zip_file, "rb") as fil:
zip_file = fil.read()
code = {'ZipFile': zip_file}
LOG.debug('running create_function_code')
response = self._lambda_client.create_function(
FunctionName=self._config.name,
Runtime=self._config.runtime,
Handler=self._config.handler,
Role=self._config.role,
Code=code,
Description=self._config.description,
Timeout=self._config.timeout,
MemorySize=self._config.memory,
Publish=self._config.publish,
VpcConfig=self._vpc_config,
Environment=environment,
TracingConfig=self._config.tracing,
)
LOG.debug("AWS create_function response: %s" % response)
return response.get('Version')
'''
Auto determines whether the function exists or not and calls
the appropriate method (upload_existing or upload_new).
'''
def upload(self, pkg):
existing_function = True
try:
get_resp = self._lambda_client.get_function_configuration(
FunctionName=self._config.name)
LOG.debug("AWS get_function_configuration response: %s" % get_resp)
except: # noqa: E722
existing_function = False
LOG.debug("function not found creating new function")
if existing_function:
self.version = self.upload_existing(pkg)
else:
self.version = self.upload_new(pkg)
'''
Create/update an alias to point to the package. Raises an
exception if the package has not been uploaded.
'''
def alias(self):
# if self.version is still None raise exception
if self.version is None:
raise Exception('Must upload package before applying alias')
if self._alias_exists():
self._update_alias()
else:
self._create_alias()
'''
Pulls down the current list of aliases and checks to see if
an alias exists.
'''
def _alias_exists(self):
resp = self._lambda_client.list_aliases(
FunctionName=self._config.name)
for alias in resp.get('Aliases'):
if alias.get('Name') == self._config.alias:
return True
return False
'''Creates alias'''
def _create_alias(self):
LOG.debug("Creating new alias %s" % self._config.alias)
resp = self._lambda_client.create_alias(
FunctionName=self._config.name,
Name=self._config.alias,
FunctionVersion=self.version,
Description=self._config.alias_description,
)
LOG.debug("AWS create_alias response: %s" % resp)
'''Update alias'''
def _update_alias(self):
LOG.debug("Updating alias %s" % self._config.alias)
resp = self._lambda_client.update_alias(
FunctionName=self._config.name,
Name=self._config.alias,
FunctionVersion=self.version,
Description=self._config.alias_description,
)
LOG.debug("AWS update_alias response: %s" % resp)
def _validate_package_size(self, pkg):
'''
Logs a warning if the package size is over the current max package size
'''
if path.getsize(pkg) > MAX_PACKAGE_SIZE:
LOG.warning("Size of your deployment package is larger than 50MB!")
def _format_vpc_config(self):
'''
Returns {} if the VPC config is set to None by Config,
returns the formatted config otherwise
'''
if self._config.raw['vpc']:
return {
'SubnetIds': self._config.raw['vpc']['subnets'],
'SecurityGroupIds': self._config.raw['vpc']['security_groups']
}
else:
return {
'SubnetIds': [],
'SecurityGroupIds': [],
}
def _upload_s3(self, zip_file):
'''
Uploads the lambda package to s3
'''
s3_client = self._aws_session.client('s3')
transfer = boto3.s3.transfer.S3Transfer(s3_client)
transfer.upload_file(zip_file, self._config.s3_bucket,
self._config.s3_package_name())
| rackerlabs/lambda-uploader | lambda_uploader/uploader.py | Python | apache-2.0 | 8,157 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import os
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready
import paddle
paddle.enable_static()
class TestCCommInitOp(unittest.TestCase):
def setUp(self):
self.endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS").split(',')
self.current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
self.nranks = len(self.endpoints)
self.rank = self.endpoints.index(self.current_endpoint)
self.mlu_id = int(os.getenv("FLAGS_selected_mlus"))
self.place = fluid.MLUPlace(self.mlu_id)
self.exe = fluid.Executor(self.place)
self.endpoints.remove(self.current_endpoint)
self.other_endpoints = self.endpoints
if self.rank == 0:
wait_server_ready(self.other_endpoints)
def test_specifying_devices(self):
program = fluid.Program()
block = program.global_block()
cncl_id_var = block.create_var(
name=fluid.unique_name.generate('cncl_id'),
persistable=True,
type=fluid.core.VarDesc.VarType.RAW)
block.append_op(
type='c_gen_cncl_id',
inputs={},
outputs={'Out': cncl_id_var},
attrs={
'rank': self.rank,
'endpoint': self.current_endpoint,
'other_endpoints': self.other_endpoints
})
block.append_op(
type='c_comm_init',
inputs={'X': cncl_id_var},
outputs={},
attrs={
'nranks': self.nranks,
'rank': self.rank,
'ring_id': 0,
'device_id': self.mlu_id
})
self.exe.run(program)
if __name__ == "__main__":
unittest.main()
| PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/mlu/c_comm_init_op_mlu.py | Python | apache-2.0 | 2,475 |
import numpy as np
from src.base.animal import Animal
class Stats(object):
# TODO: Re-implement this at a higher level
# Ugh. I feel dirty.
# name, type, min, max ... distro?
DATA = {
'air': ('i2', 0, 1000),
'hunger': ('i2', 0, 1000),
'thirst': ('i2', 0, 1000),
'stamina': ('i2', 0, 1000),
'warmth': ('i2', 0, 1000) }
# Just the first two columns
VECTOR_TEMPLATE = np.zeros(1, dtype=[(k, v[0]) for k,v in DATA.items()])
def __init__(self):
self.data = Stats.VECTOR_TEMPLATE[:]
def __getitem__(self, name):
return self.data[name]
def __setitem__(self, name, value):
if(value < Stats.DATA[name][1]):
value = Stats.DATA[name][1]
elif(value < Stats.DATA[name][2]):
value = Stats.DATA[name][2]
self.data[name] = value
class Person(Animal):
"""
Follows the hierarchy of needs:
Air, water, food, shelter, clothing
Health, safety, future security
Friendship, intimacy, family
Esteem, self-image
Goals/Dreams
"""
def __init__(self, world, name="Bob"):
super(Person, self).__init__(world)
self.name = name if name else "Noname"
self.inventory = []
| lexwraith/ThisIsBob | src/game/person.py | Python | apache-2.0 | 1,247 |
# ----------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
# Edson de Lima Barros 1715310043
# Gabriel Nascimento de Oliveira 1715310052
# Luiz Daniel Raposo Nunes de Mello 1715310049
# Renan de Almeida Campos 0825060036
# Tiago Ferreira Aranha 1715310047
# Wilbert Luís Evangelista Marins 1715310055
# Mackson Garcez Moreno de Oliveira júnior 1215090300
#
# 1.5. Faça um Programa que converta metros para centímetros.
# ----------------------------------------------------------
length_in_meters = int(input('Digite a medida (em metros): '))
length_in_centimeters = length_in_meters * 100
print ('%d metros são %d centímetros' % (length_in_meters, length_in_centimeters))
| jucimarjr/IPC_2017-1 | lista02/lista02_exercicio01_questao05.py | Python | apache-2.0 | 883 |
# -*- coding: utf-8 -*-
import urllib
from . import admin
from flask import request
from flask import url_for
from flask import redirect
from flask import render_template
from flask_login import UserMixin
from flask_login import login_user
from flask_login import logout_user
from flask_login import login_required
from core.extension import login_manager
from core.views.common import render_json
from core.models import AdminUser
class LoginUser(UserMixin):
def __init__(self, user):
self.user = user
def get_id(self):
return unicode(self.user.id)
@login_manager.user_loader
def load_user(userid):
user = AdminUser.get_by_id(int(userid))
return LoginUser(user)
@admin.route('/signin', methods=['GET', 'POST'])
def signin():
if request.method == 'POST':
user = AdminUser.query.filter_by(
active=True,
username=request.form['username'],
password=request.form['pwd']
).first()
if not user:
return render_json(1, {'err_no': 'pwd_error', 'input': 'pwd'})
login_user(LoginUser(user))
next = request.form.get('next', '')
if next:
next = urllib.unquote(next)
return render_json(0, {'href': next, 'delaySuccess': True})
return render_json(0, {'href': '/admin/dashboard', 'delaySuccess': True})
return render_template('/admin/signin.html')
@admin.route('/signout', methods=['GET'])
def signout():
logout_user()
return redirect(url_for('admin.signin'))
@admin.route('/dashboard', methods=['GET', 'POST'])
@login_required
def dashboard():
return render_template('/admin/dashboard.html')
| moxuanchen/BMS | core/views/admin/login.py | Python | apache-2.0 | 1,670 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "LaasFrontEnd.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| BryceBrown/LinkstrDjango | manage.py | Python | apache-2.0 | 255 |
# Copyright 2011 OpenStack Foundation
# aLL Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from lxml import etree
from oslo_utils import timeutils
import six
import webob
from cinder.api.v2 import types
from cinder.api.views import types as views_types
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.volume import volume_types
def stub_volume_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"
}
return dict(
id=id,
name='vol_type_%s' % six.text_type(id),
description='vol_type_desc_%s' % six.text_type(id),
extra_specs=specs,
)
def return_volume_types_get_all_types(context, search_opts=None):
return dict(
vol_type_1=stub_volume_type(1),
vol_type_2=stub_volume_type(2),
vol_type_3=stub_volume_type(3)
)
def return_empty_volume_types_get_all_types(context, search_opts=None):
return {}
def return_volume_types_get_volume_type(context, id):
if id == "777":
raise exception.VolumeTypeNotFound(volume_type_id=id)
return stub_volume_type(id)
def return_volume_types_get_by_name(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return stub_volume_type(int(name.split("_")[2]))
def return_volume_types_get_default():
return stub_volume_type(1)
def return_volume_types_get_default_not_found():
return {}
class VolumeTypesApiTest(test.TestCase):
def setUp(self):
super(VolumeTypesApiTest, self).setUp()
self.controller = types.VolumeTypesController()
def test_volume_types_index(self):
self.stubs.Set(volume_types, 'get_all_types',
return_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['volume_types']))
expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3']
actual_names = map(lambda e: e['name'], res_dict['volume_types'])
self.assertEqual(set(actual_names), set(expected_names))
for entry in res_dict['volume_types']:
self.assertEqual('value1', entry['extra_specs']['key1'])
def test_volume_types_index_no_data(self):
self.stubs.Set(volume_types, 'get_all_types',
return_empty_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict['volume_types']))
def test_volume_types_show(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
type_id = str(uuid.uuid4())
req = fakes.HTTPRequest.blank('/v2/fake/types/' + type_id)
res_dict = self.controller.show(req, type_id)
self.assertEqual(1, len(res_dict))
self.assertEqual(type_id, res_dict['volume_type']['id'])
type_name = 'vol_type_' + type_id
self.assertEqual(type_name, res_dict['volume_type']['name'])
def test_volume_types_show_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, '777')
def test_get_default(self):
self.stubs.Set(volume_types, 'get_default_volume_type',
return_volume_types_get_default)
req = fakes.HTTPRequest.blank('/v2/fake/types/default')
req.method = 'GET'
res_dict = self.controller.show(req, 'default')
self.assertEqual(1, len(res_dict))
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
self.assertEqual('vol_type_desc_1',
res_dict['volume_type']['description'])
def test_get_default_not_found(self):
self.stubs.Set(volume_types, 'get_default_volume_type',
return_volume_types_get_default_not_found)
req = fakes.HTTPRequest.blank('/v2/fake/types/default')
req.method = 'GET'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 'default')
def test_view_builder_show(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_volume_type = dict(
name='new_type',
description='new_type_desc',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.show(request, raw_volume_type)
self.assertIn('volume_type', output)
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
extra_specs={},
id=42,
)
self.assertDictMatch(output['volume_type'], expected_volume_type)
def test_view_builder_list(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_volume_types = []
for i in range(0, 10):
raw_volume_types.append(
dict(
name='new_type',
description='new_type_desc',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.index(request, raw_volume_types)
self.assertIn('volume_types', output)
for i in range(0, 10):
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
extra_specs={},
id=42 + i
)
self.assertDictMatch(output['volume_types'][i],
expected_volume_type)
class VolumeTypesSerializerTest(test.TestCase):
def _verify_volume_type(self, vtype, tree):
self.assertEqual('volume_type', tree.tag)
self.assertEqual(vtype['name'], tree.get('name'))
self.assertEqual(vtype['description'], tree.get('description'))
self.assertEqual(str(vtype['id']), tree.get('id'))
self.assertEqual(1, len(tree))
extra_specs = tree[0]
self.assertEqual('extra_specs', extra_specs.tag)
seen = set(vtype['extra_specs'].keys())
for child in extra_specs:
self.assertIn(child.tag, seen)
self.assertEqual(vtype['extra_specs'][child.tag], child.text)
seen.remove(child.tag)
self.assertEqual(len(seen), 0)
def test_index_serializer(self):
serializer = types.VolumeTypesTemplate()
# Just getting some input data
vtypes = return_volume_types_get_all_types(None)
text = serializer.serialize({'volume_types': vtypes.values()})
tree = etree.fromstring(text)
self.assertEqual('volume_types', tree.tag)
self.assertEqual(len(vtypes), len(tree))
for child in tree:
name = child.get('name')
self.assertIn(name, vtypes)
self._verify_volume_type(vtypes[name], child)
def test_voltype_serializer(self):
serializer = types.VolumeTypeTemplate()
vtype = stub_volume_type(1)
text = serializer.serialize(dict(volume_type=vtype))
tree = etree.fromstring(text)
self._verify_volume_type(vtype, tree)
| rakeshmi/cinder | cinder/tests/unit/api/v2/test_types.py | Python | apache-2.0 | 8,430 |
import pytest
import watchmaker
@pytest.fixture
def setup_object():
pass
def test_main():
"""Placeholder for tests"""
# Placeholder
assert watchmaker.__version__ == watchmaker.__version__
| MarionTheBull/watchmaker | tests/test_watchmaker.py | Python | apache-2.0 | 209 |
'''
Created on Dec 12, 2011
@author: sean
'''
from . import Visitor
import ast
#FIXME: add tests
class CopyVisitor(Visitor):
'''
Copy only ast nodes and lists
'''
def visitDefault(self, node):
Node = type(node)
new_node = Node()
for _field in Node._fields:
if hasattr(node, _field):
field = getattr(node, _field)
if isinstance(field, (list, tuple)):
new_list = []
for item in field:
if isinstance(item, ast.AST):
new_item = self.visit(item)
else:
new_item = item
new_list.append(new_item)
setattr(new_node, _field, new_list)
elif isinstance(field, ast.AST):
setattr(new_node, _field, self.visit(field))
else:
setattr(new_node, _field, field)
for _attr in node._attributes:
if hasattr(node, _attr):
setattr(new_node, _attr, getattr(node, _attr))
return new_node
def copy_node(node):
return CopyVisitor().visit(node)
| diana-hep/femtocode | lang/femtocode/thirdparty/meta/asttools/visitors/copy_tree.py | Python | apache-2.0 | 1,255 |
# Copyright 2021 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add device reservation
Revision ID: 42c7fd6e792e
Revises: 02e2f2186d98
Create Date: 2021-06-22 15:27:00.239725
"""
# revision identifiers, used by Alembic.
revision = '42c7fd6e792e'
down_revision = '02e2f2186d98'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('devices',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('device_type',
sa.Enum('container', 'vm', 'shell',
name='allowed_device_types'),
nullable=False),
sa.Column('device_driver', sa.Enum(
'zun', name='allowed_device_drivers'), nullable=False),
sa.Column('reservable', sa.Boolean(),
server_default=sa.text('true'), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('device_extra_capabilities',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('device_id', sa.String(
length=36), nullable=False),
sa.Column('capability_id', sa.String(
length=255), nullable=False),
sa.Column('capability_value', sa.Text().with_variant(
mysql.MEDIUMTEXT(), 'mysql'), nullable=False),
sa.ForeignKeyConstraint(
['capability_id'], ['extra_capabilities.id'], ),
sa.ForeignKeyConstraint(['device_id'], ['devices.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('device_allocations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', sa.String(length=36), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('device_id', sa.String(
length=36), nullable=True),
sa.Column('reservation_id', sa.String(
length=36), nullable=True),
sa.ForeignKeyConstraint(['device_id'], ['devices.id'], ),
sa.ForeignKeyConstraint(['reservation_id'], [
'reservations.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('device_reservations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', sa.String(length=36), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('reservation_id', sa.String(
length=36), nullable=True),
sa.Column('count_range', sa.String(
length=36), nullable=True),
sa.Column('resource_properties', sa.Text().with_variant(
mysql.MEDIUMTEXT(), 'mysql'), nullable=True),
sa.Column('before_end', sa.String(
length=36), nullable=True),
sa.ForeignKeyConstraint(['reservation_id'], [
'reservations.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.alter_column('instance_reservations', 'affinity',
existing_type=mysql.TINYINT(display_width=1),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('instance_reservations', 'affinity',
existing_type=mysql.TINYINT(display_width=1),
nullable=True)
op.drop_table('device_reservations')
op.drop_table('device_allocations')
op.drop_table('device_extra_capabilities')
op.drop_table('devices')
# ### end Alembic commands ###
| ChameleonCloud/blazar | blazar/db/migration/alembic_migrations/versions/42c7fd6e792e_add_device_reservation.py | Python | apache-2.0 | 5,436 |
from datetime import datetime
from rest_framework import serializers
from rest_framework.settings import api_settings
from api.models import VenueList, EventList
class VenueListSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
venue_name = serializers.CharField(max_length=255, allow_blank=False)
venue_url = serializers.CharField(max_length=255, allow_blank=False)
venue_address = serializers.CharField(max_length=255, allow_blank=False)
venue_lat_long = serializers.CharField(max_length=255, allow_blank=False)
venue_contact = serializers.CharField(max_length=255, allow_blank=False)
venue_details = serializers.CharField(max_length=255, allow_blank=False)
venue_city = serializers.CharField(max_length=255, allow_blank=False)
def create(self, validated_data):
"""
Create and return a new `Snippet` instance, given the validated data.
"""
return VenueList.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return an existing `Snippet` instance, given the validated data.
"""
instance.venue_name = validated_data.get('venue_name', instance.venue_name)
instance.venue_url = validated_data.get('venue_url', instance.venue_url)
instance.venue_address = validated_data.get('venue_address', instance.venue_address)
instance.venue_lat_long = validated_data.get('venue_lat_long', instance.venue_lat_long)
instance.venue_contact = validated_data.get('venue_contact', instance.venue_contact)
instance.venue_details = validated_data.get('venue_details', instance.venue_details)
instance.venue_city = validated_data.get('venue_city', instance.venue_city)
instance.save()
return instance
class EventListSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
venue_id = serializers.IntegerField(allow_null=False)
event_name = serializers.CharField(max_length=255, allow_blank=False)
event_price = serializers.DecimalField(allow_null=False, max_digits=6, decimal_places=2)
event_detail = serializers.CharField(max_length=255, allow_blank=False)
#time_start_end = serializers.CharField(max_length=255, allow_blank=False)
event_time_start = serializers.TimeField(format="%H:%M", input_formats=None)
event_time_end = serializers.TimeField(format="%H:%M", input_formats=None)
event_url = serializers.CharField(max_length=255, allow_blank=False)
event_img_url = serializers.CharField(max_length=255, allow_blank=False)
event_date_time = serializers.DateField(allow_null=False)
def create(self, validated_data):
return EventList.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return an existing `Snippet` instance, given the validated data.
"""
instance.venue_id = validated_data.get('venue_id', instance.venue_id)
instance.event_name = validated_data.get('event_name', instance.event_name)
instance.event_price = validated_data.get('event_price', instance.event_price)
instance.event_detail = validated_data.get('event_detail', instance.event_detail)
#instance.time_start_end = validated_data.get('time_start_end', instance.time_start_end)
instance.event_time_start = validated_data.get('event_time_start', instance.event_time_start)
instance.event_time_end = validated_data.get('event_time_end', instance.event_time_end)
instance.event_url = validated_data.get('event_url', instance.event_url)
instance.event_img_url = validated_data.get('event_img_url', instance.event_img_url)
instance.event_date_time = validated_data.get('event_date_time', instance.event_date_time)
instance.save()
return instance | dennisdarwis/dugem-backend | api/serializers.py | Python | apache-2.0 | 3,888 |
import unittest
class TestCase(unittest.TestCase):
"""Test case base class for all unit tests."""
| bondar-pavel/infoblox-client | infoblox_client/tests/base.py | Python | apache-2.0 | 105 |
#!/usr/bin/python
glint_lib_directory='/var/lib/glint'
horizon_git_repo='https://github.com/rd37/horizon.git'
glint_git_repo='https://github.com/hep-gc/glint.git'
glint_inst_type='default'
horizon_inst_type='default'
glint_server='django'
glint_horizon_server='django'
cfg_dir = '/etc/glint'
pkg_dir = 'glint-service'
import sys,subprocess
import glint_platform as plat
from glint_arg_parser import GlintArgumentParser
def proceed(msg):
print msg
input = raw_input()
if input == '' or input == 'y' or input == 'Y':
return True
return False
def execute_command(cmd_args,input):
if input is None:
process = subprocess.Popen(cmd_args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out,err = process.communicate()
else:
#print "Need to use use input"
process = subprocess.Popen(cmd_args,stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.PIPE)
out,err = process.communicate(input=input)
if err:
print "warning: %s"%err
sys.stdout.flush()
return out,err
def check_dependencies():
print "dependency check: check if git and user glint exist"
[out,err] = execute_command(['which','git'],None)
if "no git" in out:
print "Error, unable to find git tool, please install and attempt glint install again"
return False
[out,err] = execute_command(['grep','glint','/etc/passwd'],None)
if out == '':
print "Warning, unable to find system user glint"
if proceed('Do you wish to setup glint as a User? [Y,n]'):
print "Ok lets setup glint user "
[out,err] = execute_command(['python','glint_system_create_user.py','create-glint-user'],None)
if err:
print "Unable to create glint user"
return False
#print "out: %s"%out
return True
else:
return False
return True
def download_horizon():
print "download horizon using git clone"
[out,err] = execute_command(['git','clone','%s'%horizon_git_repo,'%s/horizon'%glint_lib_directory],None)
if err:
print "Unable to git clone glint-horizon "
return False
print "git clone glint-horizon result %s"%out
return True
def download_glint():
print "download glint using git clone"
[out,err] = execute_command(['git','clone','%s'%glint_git_repo,'%s/glint'%glint_lib_directory],None)
if err:
print "Unable to git clone glint"
return False
print "git clone glint result %s"%out
return True
def install_horizon():
print "Install glint-horizon"
print "Install library pre-reqs"
if plat.isRedhat():
[out,err] = execute_command(['yum','install','libxml2-devel'],'y')
print out
[out,err] = execute_command(['yum','install','libxslt-devel'],'y')
print out
[out,err] = execute_command(['yum','install','gcc'],'y')
print out
[out,err] = execute_command(['yum','install','git-core'],'y')
print out
[out,err] = execute_command(['yum','install','python-virtualenv'],'y')
print out
[out,err] = execute_command(['yum','install','python-devel'],'y')
print out
[out,err] = execute_command(['yum','install','openssl-devel'],'y')
print out
[out,err] = execute_command(['yum','install','libffi-devel'],'y')
print out
else:
[out,err] = execute_command(['apt-get','install','libxml2-dev'],'y')
print out
[out,err] = execute_command(['apt-get','install','libxslt-dev'],'y')
print out
[out,err] = execute_command(['apt-get','install','gcc'],'y')
print out
[out,err] = execute_command(['apt-get','install','git-core'],'y')
print out
[out,err] = execute_command(['apt-get','install','python-virtualenv'],'y')
print out
[out,err] = execute_command(['apt-get','install','python-dev'],'y')
print out
[out,err] = execute_command(['apt-get','install','libssl-dev'],'y')
print out
[out,err] = execute_command(['apt-get','install','libffi-dev'],'y')
print out
if horizon_inst_type == 'default':
print "Install Horizon using default (virtualenv in /var/lib/glint/horizon/.venv)"
[out,err] = execute_command(['python','/var/lib/glint/horizon/tools/install_venv.py'],None)
[out,err] = execute_command(['chown','-R','glint','/var/lib/glint/horizon'],None)
[out,err] = execute_command(['chgrp','-R','glint','/var/lib/glint/horizon'],None)
elif horizon_inst_type == 'replace':
print "Currently Unsupported: Remove openstack-horizon and replace with glint-horizon"
elif horizon_inst_type == 'contextualize':
print "Currently Unsupported: Insert or Replace parts of the openstack-horizon installation"
else:
print "Unrecognized installation type for glint - %s - error exiting"%horizon_inst_type
return
print "IP:Open Port used for glint-horizon ... port 8080, restart networking"
print "mkdir /var/run/glint and change permissions"
[out,err] = execute_command(['mkdir','/var/run/glint'],None)
[out,err] = execute_command(['chown','glint','/var/run/glint'],None)
[out,err] = execute_command(['chgrp','glint','/var/run/glint'],None)
if glint_horizon_server == 'django':
print "Setup /usr/bin/glint-horizon as main system start application (reads cfg file for gl-hor location)"
#copy glint-horizon from /var/lib/glint/horizon to /usr/bin/glint-horizon
[out,err] = execute_command(['cp','%s/glint-horizon'%pkg_dir,'/usr/bin/.'],None)
[out,err] = execute_command(['chmod','755','/usr/bin/glint-horizon'],None)
print "Setup /etc/init.d/glint-horizon as a service"
[out,err] = execute_command(['cp','%s/openstack-glint-horizon'%pkg_dir,'/etc/init.d/.'],None)
[out,err] = execute_command(['chmod','755','/etc/init.d/openstack-glint-horizon'],None)
elif glint_horizon_server == 'apache':
print "Currently Unsupprted: Register glint-horizon with local apache this is used by /user/bin/glint-horizon to start stop the apache app"
print "Currently Unsupported: Setup /usr/bin/glint-horizon as main system start application (reads cfg file for gl-hor location)"
print "Currently Unsupported: Setup /etc/init.d/glint-horizon as a service"
def install_glint():
print "Install glint"
if glint_inst_type == 'default':
print "Leave glint in /var/lib/glint/glint, but change own and group to glint"
[out,err] = execute_command(['chown','-R','glint','/var/lib/glint/glint'],None)
[out,err] = execute_command(['chgrp','-R','glint','/var/lib/glint/glint'],None)
elif glint_inst_type == 'local':
print "Currently Unsupported: Install glint into sites-packages - use setup.py"
else:
print "Unrecognized installation type for glint - %s - error exiting"%glint_inst_type
return
print "IP:Open Glint Port 9494 and restart networking"
print "mkdir /var/run/glint and change permissions"
[out,err] = execute_command(['mkdir','/var/log/glint-service'],None)
[out,err] = execute_command(['chown','glint','/var/log/glint-service'],None)
[out,err] = execute_command(['chgrp','glint','/var/log/glint-service'],None)
print "copy glint service yaml conf file"
[out,err] = execute_command(['cp','%s/glint_services.yaml'%cfg_dir,'/var/lib/glint/glint/.'],None)
[out,err] = execute_command(['chown','glint:glint','/var/lib/glint/glint/glint_services.yaml'],None)
if glint_server == 'django':
print "Setup /usr/bin/glint as main start of glint server from installed (either /var/lib or site-packeges) using django test server"
[out,err] = execute_command(['cp','%s/glint'%pkg_dir,'/usr/bin/.'],None)
[out,err] = execute_command(['chmod','755','/usr/bin/glint'],None)
print "Setup /etc/init.d/glint as a service "
[out,err] = execute_command(['cp','%s/openstack-glint'%pkg_dir,'/etc/init.d/.'],None)
[out,err] = execute_command(['chmod','755','/etc/init.d/openstack-glint'],None)
elif glint_server == 'paste':
print "Currently Unsupported: Setup /usr/bin/glint as main start of glint server from installed (either /var/lib or site-packeges) using django test server"
print "Currently Unsupported: Setup /etc/init.d/glint as a service "
def uninstall_horizon():
print "Uninstall glint-horizon"
print "Stop glint-horizon service and remove it"
[out,err] = execute_command(['rm','/etc/init.d/openstack-glint-horizon'],None)
print "Remove /usr/bin/glint-horizon script"
[out,err] = execute_command(['rm','/usr/bin/glint-horizon'],None)
if glint_horizon_server == 'django':
print "Nothing to Do for django server"
elif glint_horizon_server == 'apache':
print "Currently Unsupported:Remove glint-horizon from apache"
print "IP:Close port used by glint-horizon"
if horizon_inst_type == 'default':
print "IP: UNInstall Horizon using default (virtualenv in /var/lib/glint/horizon/.venv)"
elif horizon_inst_type == 'replace':
print "Currently Unsupported: Remove glint-horizon and replace with openstack-horizon"
elif horizon_inst_type == 'contextualize':
print "Currently Unsupported: Revert changes to parts of the openstack-horizon installation"
def uninstall_glint():
print "uninstall glint"
if glint_inst_type == 'default':
print "Default Unistall - nothing to do here"
elif glint_inst_type == 'local':
print "Currently Unsupported: remove glint from sites-packages - use setup.py"
print "Remove /etc/init.d/openstack-glint "
[out,err] = execute_command(['rm','/etc/init.d/openstack-glint'],None)
print "Remove /usr/bin/glint script"
[out,err] = execute_command(['rm','/usr/bin/glint'],None)
print "Remove log directory"
[out,err] = execute_command(['rm','/var/log/glint-service'],None)
print "IP: Shutdown Glint Port 9494 and restart networking"
########### Uninstalling glint and and glint-horizon
def remove_glint():
print "Try Removing Glint Git Repository"
[out,err] = execute_command(['rm','-rf','/var/lib/glint/glint'],None)
def remove_glint_horizon():
print "Try Removing Glint-Horizon Git Repository"
[out,err] = execute_command(['rm','-rf','/var/lib/glint/horizon'],None)
########### Main Func
gap = GlintArgumentParser()
gap.init_git_arg_parser()
args = gap.parser.parse_args()
print args
if args.install is not None:
if args.glint_url is not None:
glint_git_repo = args.glint_url
if args.glint_hor_url is not None:
horizon_git_repo = args.glint_hor_url
if args.glint_inst_type is not None:
glint_inst_type = args.glint_inst_type
if args.hor_inst_type is not None:
horizon_inst_type = args.hor_inst_type
if args.glint_server is not None:
glint_server = args.glint_server[0]
if args.glint_horizon_server is not None:
glint_horizon_server = args.glint_horizon_server[0]
if check_dependencies():
print "Git and User Glint are OK ... moving along"
if args.install[0] == 'all':
download_horizon()
download_glint()
install_horizon()
install_glint()
elif args.install[0] == 'glint':
download_glint()
install_glint()
elif args.install[0] == 'horizon':
download_horizon()
install_horizon()
else:
print "Check your Setup, system requirements are the git tool and user glint to exist"
elif args.uninstall is not None:
if args.uninstall[0] == 'all':
uninstall_horizon()
uninstall_glint()
remove_glint()
remove_glint_horizon()
elif args.install[0] == 'glint':
uninstall_glint()
remove_glint()
elif args.install[0] == 'horizon':
uninstall_horizon()
remove_glint_horizon()
| hep-gc/glint-service | glint-service/glint_git_setup.py | Python | apache-2.0 | 12,100 |
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper utils used in artifact and ontology_artifact classes."""
from typing import Any, Dict, Tuple
import enum
import jsonschema
import os
import yaml
class SchemaFieldType(enum.Enum):
"""Supported Schema field types."""
NUMBER = 'number'
INTEGER = 'integer'
STRING = 'string'
BOOL = 'bool'
OBJECT = 'object'
ARRAY = 'array'
def parse_schema(yaml_schema: str) -> Tuple[str, Dict[str, SchemaFieldType]]:
"""Parses yaml schema.
Ensures that schema is well-formed and returns dictionary of properties and
its type for type-checking.
Args:
yaml_schema: Yaml schema to be parsed.
Returns:
str: Title set in the schema.
Dict: Property name to SchemaFieldType enum.
Raises:
ValueError if title field is not set in schema or an
unsupported(i.e. not defined in SchemaFieldType)
type is specified for the field.
"""
schema = yaml.full_load(yaml_schema)
if 'title' not in schema.keys():
raise ValueError('Invalid _schema, title must be set. \
Got: {}'.format(yaml_schema))
title = schema['title']
properties = {}
if 'properties' in schema.keys():
schema_properties = schema['properties'] or {}
for property_name, property_def in schema_properties.items():
try:
properties[property_name] = SchemaFieldType(
property_def['type'])
except ValueError:
raise ValueError('Unsupported type:{} specified for field: {} \
in schema'.format(property_def['type'], property_name))
return title, properties
def verify_schema_instance(schema: str, instance: Dict[str, Any]):
"""Verifies instnace is well-formed against the schema.
Args:
schema: Schema to use for verification.
instance: Object represented as Dict to be verified.
Raises:
RuntimeError if schema is not well-formed or instance is invalid against
the schema.
"""
if len(instance) == 0:
return
try:
jsonschema.validate(instance=instance, schema=yaml.full_load(schema))
except jsonschema.exceptions.SchemaError:
raise RuntimeError('Invalid schema schema: {} used for \
verification'.format(schema))
except jsonschema.exceptions.ValidationError:
raise RuntimeError('Invalid values set: {} in object for schema: \
{}'.format(instance, schema))
def read_schema_file(schema_file: str) -> str:
"""Reads yamls schema from type_scheams folder.
Args:
schema_file: Name of the file to read schema from.
Returns:
Read schema from the schema file.
"""
schema_file_path = os.path.join(
os.path.dirname(__file__), 'type_schemas', schema_file)
with open(schema_file_path) as schema_file:
return schema_file.read()
| kubeflow/pipelines | sdk/python/kfp/deprecated/dsl/artifact_utils.py | Python | apache-2.0 | 3,439 |
from tests.base import TestBase
from pascal.program import Program
class TestVariables(TestBase):
def test_pass_valid_var(self):
file_name = "tests/mock_pas/all_var.pas"
pascal_program = Program(file_name)
pascal_program.run()
self.assertEqual(len(pascal_program.symbol_table), 7)
self.assertEqual(pascal_program.symbol_address, 23)
def test_pass_assign(self):
file_name = "tests/mock_pas/variables.pas"
pascal_program = Program(file_name)
pascal_program.run()
| TheLampshady/pascompiler | tests/test_variables.py | Python | apache-2.0 | 537 |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2018-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import os.path
import edgedb
from edb.testbase import server as tb
class TestEdgeQLCasts(tb.QueryTestCase):
'''Testing symmetry and validity of casts.
Scalar casting is symmetric in the sense that if casting scalar
type X into Y is valid then it is also valid to cast Y into X.
Some casts are lossless. A cast from X into Y is lossless if all
the relevant details of the value of type X can be unambiguously
represented by a value of type Y. Examples of lossless casts:
- any scalar can be losslessly cast into a str
- int16 and int32 can be losslessly cast into int64
- int16 can be losslessly cast into float32
- any numeric type can be losslessly cast into a decimal
Sometimes only specific values (a subset of the entire domain of
the scalar) can be cast losslessly:
- 2147299968 can be cast losslessly into a float32, but not 2147299969
- decimal 2.5 can be cast losslessly into a float32, but not decimal
2.5000000001
Consider two types X and Y with corresponding values x and y.
If x can be losslessly cast into Y, then casting it back is also lossless:
x = <X><Y>x
'''
# FIXME: a special schema should be used here since we need to
# cover all known scalars and even some arrays and tuples.
SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas',
'casts.esdl')
SETUP = os.path.join(os.path.dirname(__file__), 'schemas',
'casts_setup.edgeql')
# NOTE: nothing can be cast into bytes
async def test_edgeql_casts_bytes_01(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>True;
""")
async def test_edgeql_casts_bytes_02(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>uuid_generate_v1mc();
""")
async def test_edgeql_casts_bytes_03(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>'Hello';
""")
async def test_edgeql_casts_bytes_04(self):
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError, r'expected JSON string or null'):
await self.con.query_single("""SELECT <bytes>to_json('1');"""),
self.assertEqual(
await self.con.query_single(r'''
SELECT <bytes>to_json('"aGVsbG8="');
'''),
b'hello',
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError, r'invalid symbol'):
await self.con.query_single("""
SELECT <bytes>to_json('"not base64!"');
""")
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError, r'invalid base64 end sequence'):
await self.con.query_single("""
SELECT <bytes>to_json('"a"');
""")
async def test_edgeql_casts_bytes_05(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>datetime_current();
""")
async def test_edgeql_casts_bytes_06(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT
<bytes>cal::to_local_datetime('2018-05-07T20:01:22.306916');
""")
async def test_edgeql_casts_bytes_07(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>cal::to_local_date('2018-05-07');
""")
async def test_edgeql_casts_bytes_08(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>cal::to_local_time('20:01:22.306916');
""")
async def test_edgeql_casts_bytes_09(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>to_duration(hours:=20);
""")
async def test_edgeql_casts_bytes_10(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>to_int16('2');
""")
async def test_edgeql_casts_bytes_11(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>to_int32('2');
""")
async def test_edgeql_casts_bytes_12(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>to_int64('2');
""")
async def test_edgeql_casts_bytes_13(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>to_float32('2');
""")
async def test_edgeql_casts_bytes_14(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>to_float64('2');
""")
async def test_edgeql_casts_bytes_15(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>to_decimal('2');
""")
async def test_edgeql_casts_bytes_16(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast'):
await self.con.execute("""
SELECT <bytes>to_bigint('2');
""")
# NOTE: casts are idempotent
async def test_edgeql_casts_idempotence_01(self):
await self.assert_query_result(
r'''SELECT <bool><bool>True IS bool;''',
[True],
)
await self.assert_query_result(
r'''SELECT <bytes><bytes>b'Hello' IS bytes;''',
[True],
)
await self.assert_query_result(
r'''SELECT <str><str>'Hello' IS str;''',
[True],
)
await self.assert_query_result(
r'''SELECT <json><json>to_json('1') IS json;''',
[True],
)
await self.assert_query_result(
r'''SELECT <uuid><uuid>uuid_generate_v1mc() IS uuid;''',
[True],
)
await self.assert_query_result(
r'''SELECT <datetime><datetime>datetime_current() IS datetime;''',
[True],
)
await self.assert_query_result(
r'''
SELECT <cal::local_datetime><cal::local_datetime>
cal::to_local_datetime(
'2018-05-07T20:01:22.306916') IS cal::local_datetime;
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <cal::local_date><cal::local_date>cal::to_local_date(
'2018-05-07') IS cal::local_date;
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <cal::local_time><cal::local_time>cal::to_local_time(
'20:01:22.306916') IS cal::local_time;
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <duration><duration>to_duration(
hours:=20) IS duration;
''',
[True],
)
await self.assert_query_result(
r'''SELECT <int16><int16>to_int16('12345') IS int16;''',
[True],
)
await self.assert_query_result(
r'''SELECT <int32><int32>to_int32('1234567890') IS int32;''',
[True],
)
await self.assert_query_result(
r'''SELECT <int64><int64>to_int64('1234567890123') IS int64;''',
[True],
)
await self.assert_query_result(
r'''SELECT <float32><float32>to_float32('2.5') IS float32;''',
[True],
)
await self.assert_query_result(
r'''SELECT <float64><float64>to_float64('2.5') IS float64;''',
[True],
)
await self.assert_query_result(
r'''
SELECT <bigint><bigint>to_bigint(
'123456789123456789123456789')
IS bigint;
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <decimal><decimal>to_decimal(
'123456789123456789123456789.123456789123456789123456789')
IS decimal;
''',
[True],
)
async def test_edgeql_casts_idempotence_02(self):
await self.assert_query_result(
r'''SELECT <bool><bool>True = True;''',
[True],
)
await self.assert_query_result(
r'''SELECT <bytes><bytes>b'Hello' = b'Hello';''',
[True],
)
await self.assert_query_result(
r'''SELECT <str><str>'Hello' = 'Hello';''',
[True],
)
await self.assert_query_result(
r'''SELECT <json><json>to_json('1') = to_json('1');''',
[True],
)
await self.assert_query_result(
r'''
WITH U := uuid_generate_v1mc()
SELECT <uuid><uuid>U = U;
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <datetime><datetime>datetime_of_statement() =
datetime_of_statement();
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <cal::local_datetime><cal::local_datetime>
cal::to_local_datetime('2018-05-07T20:01:22.306916') =
cal::to_local_datetime('2018-05-07T20:01:22.306916');
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <cal::local_date><cal::local_date>
cal::to_local_date('2018-05-07') =
cal::to_local_date('2018-05-07');
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <cal::local_time><cal::local_time>cal::to_local_time(
'20:01:22.306916') = cal::to_local_time('20:01:22.306916');
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <duration><duration>to_duration(hours:=20) =
to_duration(hours:=20);
''',
[True],
)
await self.assert_query_result(
r'''SELECT <int16><int16>to_int16('12345') = 12345;''',
[True],
)
await self.assert_query_result(
r'''SELECT <int32><int32>to_int32('1234567890') = 1234567890;''',
[True],
)
await self.assert_query_result(
r'''
SELECT <int64><int64>to_int64('1234567890123') =
1234567890123;
''',
[True],
)
await self.assert_query_result(
r'''SELECT <float32><float32>to_float32('2.5') = 2.5;''',
[True],
)
await self.assert_query_result(
r'''SELECT <float64><float64>to_float64('2.5') = 2.5;''',
[True],
)
await self.assert_query_result(
r'''
SELECT <bigint><bigint>to_bigint(
'123456789123456789123456789')
= to_bigint(
'123456789123456789123456789');
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <decimal><decimal>to_decimal(
'123456789123456789123456789.123456789123456789123456789')
= to_decimal(
'123456789123456789123456789.123456789123456789123456789');
''',
[True],
)
async def test_edgeql_casts_str_01(self):
# Casting to str and back is lossless for every scalar (if
# legal). It's still not legal to cast bytes into str or some
# of the json values.
await self.assert_query_result(
r'''SELECT <bool><str>True = True;''',
[True],
)
await self.assert_query_result(
r'''SELECT <bool><str>False = False;''',
[True],
# only JSON strings can be cast into EdgeQL str
)
await self.assert_query_result(
r'''SELECT <json><str>to_json('"Hello"') = to_json('"Hello"');''',
[True],
)
await self.assert_query_result(
r'''
WITH U := uuid_generate_v1mc()
SELECT <uuid><str>U = U;
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <datetime><str>datetime_of_statement() =
datetime_of_statement();
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <cal::local_datetime><str>cal::to_local_datetime(
'2018-05-07T20:01:22.306916') =
cal::to_local_datetime('2018-05-07T20:01:22.306916');
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <cal::local_date><str>cal::to_local_date('2018-05-07') =
cal::to_local_date('2018-05-07');
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <cal::local_time><str>
cal::to_local_time('20:01:22.306916') =
cal::to_local_time('20:01:22.306916');
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <duration><str>to_duration(hours:=20) =
to_duration(hours:=20);
''',
[True],
)
await self.assert_query_result(
r'''SELECT <int16><str>to_int16('12345') = 12345;''',
[True],
)
await self.assert_query_result(
r'''SELECT <int32><str>to_int32('1234567890') = 1234567890;''',
[True],
)
await self.assert_query_result(
r'''
SELECT <int64><str>to_int64(
'1234567890123') = 1234567890123;
''',
[True],
)
await self.assert_query_result(
r'''SELECT <float32><str>to_float32('2.5') = 2.5;''',
[True],
)
await self.assert_query_result(
r'''SELECT <float64><str>to_float64('2.5') = 2.5;''',
[True],
)
await self.assert_query_result(
r'''
SELECT <bigint><str>to_bigint(
'123456789123456789123456789')
= to_bigint(
'123456789123456789123456789');
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <decimal><str>to_decimal(
'123456789123456789123456789.123456789123456789123456789')
= to_decimal(
'123456789123456789123456789.123456789123456789123456789');
''',
[True],
)
async def test_edgeql_casts_str_02(self):
# Certain strings can be cast into other types losslessly,
# making them "canonical" string representations of those
# values.
await self.assert_query_result(
r'''
WITH x := {'true', 'false'}
SELECT <str><bool>x = x;
''',
[True, True],
)
await self.assert_query_result(
r'''
WITH x := {'True', 'False', 'TRUE', 'FALSE', ' TrUe '}
SELECT <str><bool>x = x;
''',
[False, False, False, False, False],
)
await self.assert_query_result(
r'''
WITH x := {'True', 'False', 'TRUE', 'FALSE', 'TrUe'}
SELECT <str><bool>x = str_lower(x);
''',
[True, True, True, True, True],
)
for variant in {'😈', 'yes', '1', 'no', 'on', 'OFF',
't', 'f', 'tr', 'fa'}:
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
fr"invalid syntax for std::bool: '{variant}'"):
await self.con.query_single(f'SELECT <bool>"{variant}"')
self.assertTrue(
await self.con.query_single('SELECT <bool>" TruE "'))
self.assertFalse(
await self.con.query_single('SELECT <bool>" FalsE "'))
async def test_edgeql_casts_str_03(self):
# str to json is always lossless
await self.assert_query_result(
r'''
WITH x := {'any', 'arbitrary', '♠gibberish♠'}
SELECT <str><json>x = x;
''',
[True, True, True],
)
async def test_edgeql_casts_str_04(self):
# canonical uuid representation as a string is using lowercase
await self.assert_query_result(
r'''
WITH x := 'd4288330-eea3-11e8-bc5f-7faf132b1d84'
SELECT <str><uuid>x = x;
''',
[True],
)
await self.assert_query_result(
# non-canonical
r'''
WITH x := {
'D4288330-EEA3-11E8-BC5F-7FAF132B1D84',
'D4288330-Eea3-11E8-Bc5F-7Faf132B1D84',
'D4288330-eea3-11e8-bc5f-7faf132b1d84',
}
SELECT <str><uuid>x = x;
''',
[False, False, False],
)
await self.assert_query_result(
r'''
WITH x := {
'D4288330-EEA3-11E8-BC5F-7FAF132B1D84',
'D4288330-Eea3-11E8-Bc5F-7Faf132B1D84',
'D4288330-eea3-11e8-bc5f-7faf132b1d84',
}
SELECT <str><uuid>x = str_lower(x);
''',
[True, True, True],
)
async def test_edgeql_casts_str_05(self):
# Canonical date and time str representations must follow ISO
# 8601. This test assumes that the server is configured to be
# in UTC time zone.
await self.assert_query_result(
r'''
WITH x := '2018-05-07T20:01:22.306916+00:00'
SELECT <str><datetime>x = x;
''',
[True],
)
await self.assert_query_result(
# validating that these are all in fact the same datetime
r'''
WITH x := {
'2018-05-07T15:01:22.306916-05:00',
'2018-05-07T15:01:22.306916-05',
'2018-05-07T20:01:22.306916Z',
'2018-05-07T20:01:22.306916+0000',
'2018-05-07T20:01:22.306916+00',
# the '-' and ':' separators may be omitted
'20180507T200122.306916+00',
# acceptable RFC 3339
'2018-05-07 20:01:22.306916+00:00',
'2018-05-07t20:01:22.306916z',
}
SELECT <datetime>x =
<datetime>'2018-05-07T20:01:22.306916+00:00';
''',
[True, True, True, True, True, True, True, True],
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax'):
await self.con.query_single(
'SELECT <datetime>"2018-05-07;20:01:22.306916+00:00"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax'):
await self.con.query_single(
'SELECT <datetime>"2018-05-07T20:01:22.306916"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax'):
await self.con.query_single(
'SELECT <datetime>"2018-05-07T20:01:22.306916 1000"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax'):
await self.con.query_single(
'SELECT <datetime>"2018-05-07T20:01:22.306916 US/Central"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax'):
await self.con.query_single(
'SELECT <datetime>"2018-05-07T20:01:22.306916 +GMT1"')
async def test_edgeql_casts_str_06(self):
# Canonical date and time str representations must follow ISO
# 8601. This test assumes that the server is configured to be
# in UTC time zone.
await self.assert_query_result(
r'''
WITH x := '2018-05-07T20:01:22.306916'
SELECT <str><cal::local_datetime>x = x;
''',
[True],
)
await self.assert_query_result(
# validating that these are all in fact the same datetime
r'''
WITH x := {
# the '-' and ':' separators may be omitted
'20180507T200122.306916',
# acceptable RFC 3339
'2018-05-07 20:01:22.306916',
'2018-05-07t20:01:22.306916',
}
SELECT <cal::local_datetime>x =
<cal::local_datetime>'2018-05-07T20:01:22.306916';
''',
[True, True, True],
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'SELECT <cal::local_datetime>"2018-05-07;20:01:22.306916"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'''
SELECT
<cal::local_datetime>"2018-05-07T20:01:22.306916+01:00"
''')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'SELECT <cal::local_datetime>"2018-05-07T20:01:22.306916 GMT"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'''
SELECT
<cal::local_datetime>"2018-05-07T20:01:22.306916 GMT0"
''')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'''SELECT <cal::local_datetime>
"2018-05-07T20:01:22.306916 US/Central"
''')
async def test_edgeql_casts_str_07(self):
# Canonical date and time str representations must follow ISO
# 8601.
await self.assert_query_result(
r'''
WITH x := '2018-05-07'
SELECT <str><cal::local_date>x = x;
''',
[True],
)
await self.assert_query_result(
# validating that these are all in fact the same date
r'''
WITH x := {
# the '-' separators may be omitted
'20180507',
}
SELECT <cal::local_date>x = <cal::local_date>'2018-05-07';
''',
[True],
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'SELECT <cal::local_date>"2018-05-07T20:01:22.306916"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'SELECT <cal::local_date>"2018/05/07"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'SELECT <cal::local_date>"2018.05.07"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'SELECT <cal::local_date>"2018-05-07+01:00"')
async def test_edgeql_casts_str_08(self):
# Canonical date and time str representations must follow ISO
# 8601.
await self.assert_query_result(
r'''
WITH x := '20:01:22.306916'
SELECT <str><cal::local_time>x = x;
''',
[True],
)
await self.assert_query_result(
r'''
WITH x := {
'20:01',
'20:01:00',
# the ':' separators may be omitted
'2001',
'200100',
}
SELECT <cal::local_time>x = <cal::local_time>'20:01:00';
''',
[True, True, True, True],
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
'invalid input syntax for type cal::local_time'):
await self.con.query_single(
"SELECT <cal::local_time>'2018-05-07 20:01:22'")
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'SELECT <cal::local_time>"20:01:22.306916+01:00"')
async def test_edgeql_casts_str_09(self):
# Canonical duration
await self.assert_query_result(
r'''
WITH x := 'PT20H1M22.306916S'
SELECT <str><duration>x = x;
''',
[True],
)
await self.assert_query_result(
# non-canonical
r'''
WITH x := {
'20:01:22.306916',
'20h 1m 22.306916s',
'20 hours 1 minute 22.306916 seconds',
'72082.306916', # the duration in seconds
'0.834285959675926 days',
}
SELECT <str><duration>x = x;
''',
[False, False, False, False, False],
)
await self.assert_query_result(
# validating that these are all in fact the same duration
r'''
WITH x := {
'20:01:22.306916',
'20h 1m 22.306916s',
'20 hours 1 minute 22.306916 seconds',
'72082.306916', # the duration in seconds
'0.834285959675926 days',
}
SELECT <duration>x = <duration>'PT20H1M22.306916S';
''',
[True, True, True, True, True],
)
async def test_edgeql_casts_str_10(self):
# valid casts from str to any integer is lossless, as long as
# there's no whitespace, which is trimmed
await self.assert_query_result(
r'''
WITH x := {'-20', '0', '7', '12345'}
SELECT <str><int16>x = x;
''',
[True, True, True, True],
)
await self.assert_query_result(
r'''
WITH x := {'-20', '0', '7', '12345'}
SELECT <str><int32>x = x;
''',
[True, True, True, True],
)
await self.assert_query_result(
r'''
WITH x := {'-20', '0', '7', '12345'}
SELECT <str><int64>x = x;
''',
[True, True, True, True],
)
await self.assert_query_result(
# with whitespace
r'''
WITH x := {
' 42',
'42 ',
' 42 ',
}
SELECT <str><int16>x = x;
''',
[False, False, False],
)
await self.assert_query_result(
# validating that these are all in fact the same value
r'''
WITH x := {
' 42',
'42 ',
' 42 ',
}
SELECT <int16>x = 42;
''',
[True, True, True],
)
async def test_edgeql_casts_str_11(self):
# There's too many ways of representing floats. Outside of
# trivial 1-2 digit cases, relying on any str being
# "canonical" is not safe, making most casts from str to float
# lossy.
await self.assert_query_result(
r'''
WITH x := {'-20', '0', '7.2'}
SELECT <str><float32>x = x;
''',
[True, True, True],
)
await self.assert_query_result(
r'''
WITH x := {'-20', '0', '7.2'}
SELECT <str><float64>x = x;
''',
[True, True, True],
)
await self.assert_query_result(
# non-canonical
r'''
WITH x := {
'0.0000000001234',
'1234E-13',
'0.1234e-9',
}
SELECT <str><float32>x = x;
''',
[False, False, False],
)
await self.assert_query_result(
r'''
WITH x := {
'0.0000000001234',
'1234E-13',
'0.1234e-9',
}
SELECT <str><float64>x = x;
''',
[False, False, False],
)
await self.assert_query_result(
# validating that these are all in fact the same value
r'''
WITH x := {
'0.0000000001234',
'1234E-13',
'0.1234e-9',
}
SELECT <float64>x = 1234e-13;
''',
[True, True, True],
)
async def test_edgeql_casts_str_12(self):
# The canonical string representation of decimals is without
# use of scientific notation.
await self.assert_query_result(
r'''
WITH x := {
'-20', '0', '7.2', '0.0000000001234', '1234.00000001234'
}
SELECT <str><decimal>x = x;
''',
[True, True, True, True, True],
)
await self.assert_query_result(
# non-canonical
r'''
WITH x := {
'1234E-13',
'0.1234e-9',
}
SELECT <str><decimal>x = x;
''',
[False, False],
)
await self.assert_query_result(
# validating that these are all in fact the same date
r'''
WITH x := {
'1234E-13',
'0.1234e-9',
}
SELECT <decimal>x = <decimal>'0.0000000001234';
''',
[True, True],
)
async def test_edgeql_casts_str_13(self):
# Casting to str and back is lossless for every scalar (if
# legal). It's still not legal to cast bytes into str or some
# of the json values.
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <uuid><str>T.id = T.id;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <bool><str>T.p_bool = T.p_bool;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <str><str>T.p_str = T.p_str;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <datetime><str>T.p_datetime = T.p_datetime;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <cal::local_datetime><str>T.p_local_datetime =
T.p_local_datetime;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <cal::local_date><str>T.p_local_date = T.p_local_date;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <cal::local_time><str>T.p_local_time = T.p_local_time;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <duration><str>T.p_duration = T.p_duration;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <int16><str>T.p_int16 = T.p_int16;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <int32><str>T.p_int32 = T.p_int32;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <int64><str>T.p_int64 = T.p_int64;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <float32><str>T.p_float32 = T.p_float32;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <float64><str>T.p_float64 = T.p_float64;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <bigint><str>T.p_bigint = T.p_bigint;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <decimal><str>T.p_decimal = T.p_decimal;
''',
[True],
)
async def test_edgeql_casts_numeric_01(self):
# Casting to decimal and back should be lossless for any other
# integer type.
for numtype in {'bigint', 'decimal'}:
await self.assert_query_result(
# technically we're already casting a literal int64
# to int16 first
f'''
WITH x := <int16>{{-32768, -32767, -100,
0, 13, 32766, 32767}}
SELECT <int16><{numtype}>x = x;
''',
[True, True, True, True, True, True, True],
)
await self.assert_query_result(
# technically we're already casting a literal int64
# to int32 first
f'''
WITH x := <int32>{{-2147483648, -2147483647, -65536, -100,
0, 13, 32768, 2147483646, 2147483647}}
SELECT <int32><{numtype}>x = x;
''',
[True, True, True, True, True, True, True, True, True],
)
await self.assert_query_result(
f'''
WITH x := <int64>{{
-9223372036854775808,
-9223372036854775807,
-4294967296,
-65536,
-100,
0,
13,
65536,
4294967296,
9223372036854775806,
9223372036854775807
}}
SELECT <int64><{numtype}>x = x;
''',
[True, True, True, True, True, True,
True, True, True, True, True],
)
async def test_edgeql_casts_numeric_02(self):
# Casting to decimal and back should be lossless for any other
# float type of low precision (a couple of digits less than
# the maximum possible float precision).
await self.assert_query_result(
# technically we're already casting a literal int64 or
# float64 to float32 first
r'''
WITH x := <float32>{-3.31234e+38, -1.234e+12, -1.234e-12,
-100, 0, 13, 1.234e-12, 1.234e+12, 3.4e+38}
SELECT <float32><decimal>x = x;
''',
[True, True, True, True, True, True, True, True, True],
)
await self.assert_query_result(
r'''
WITH x := <float64>{-1.61234e+308, -1.234e+42, -1.234e-42,
-100, 0, 13, 1.234e-42, 1.234e+42,
1.7e+308}
SELECT <float64><decimal>x = x;
''',
[True, True, True, True, True, True, True, True, True],
)
async def test_edgeql_casts_numeric_03(self):
# It is especially dangerous to cast an int32 into float32 and
# back because float32 cannot losslessly represent the entire
# range of int32, but it can represent some of it, so no
# obvious errors would be raised (as any int32 value is
# technically withing valid range of float32), but the value
# could be mangled.
await self.assert_query_result(
# ints <= 2^24 can be represented exactly in a float32
r'''
WITH x := <int32>{16777216, 16777215, 16777214,
1677721, 167772, 16777}
SELECT <int32><float32>x = x;
''',
[True, True, True, True, True, True],
)
await self.assert_query_result(
# max int32 -100, -1000
r'''
WITH x := <int32>{2147483548, 2147482648}
SELECT <int32><float32>x = x;
''',
[False, False],
)
await self.assert_query_result(
r'''
WITH x := <int32>{2147483548, 2147482648}
SELECT <int32><float32>x;
''',
[2147483520, 2147482624],
)
async def test_edgeql_casts_numeric_04(self):
await self.assert_query_result(
# ints <= 2^24 can be represented exactly in a float32
r'''
WITH x := <int32>{16777216, 16777215, 16777214,
1677721, 167772, 16777}
SELECT <int32><float64>x = x;
''',
[True, True, True, True, True, True],
)
await self.assert_query_result(
# max int32 -1, -2, -3, -10, -100, -1000
r'''
WITH x := <int32>{2147483647, 2147483646, 2147483645,
2147483638, 2147483548, 2147482648}
SELECT <int32><float64>x = x;
''',
[True, True, True, True, True, True],
)
async def test_edgeql_casts_numeric_05(self):
# Due to the sparseness of float values large integers may not
# be representable exactly if they require better precision
# than float provides.
await self.assert_query_result(
r'''
# 2^31 -1, -2, -3, -10
WITH x := <int32>{2147483647, 2147483646, 2147483645,
2147483638}
# 2147483647 is the max int32
SELECT x <= <int32>2147483647;
''',
[True, True, True, True],
)
async with self.assertRaisesRegexTx(
edgedb.NumericOutOfRangeError, r"std::int32 out of range"):
async with self.con.transaction():
await self.con.execute("""
SELECT <int32><float32><int32>2147483647;
""")
async with self.assertRaisesRegexTx(
edgedb.NumericOutOfRangeError, r"std::int32 out of range"):
async with self.con.transaction():
await self.con.execute("""
SELECT <int32><float32><int32>2147483646;
""")
async with self.assertRaisesRegexTx(
edgedb.NumericOutOfRangeError, r"std::int32 out of range"):
async with self.con.transaction():
await self.con.execute("""
SELECT <int32><float32><int32>2147483645;
""")
async with self.assertRaisesRegexTx(
edgedb.NumericOutOfRangeError, r"std::int32 out of range"):
async with self.con.transaction():
await self.con.execute("""
SELECT <int32><float32><int32>2147483638;
""")
async def test_edgeql_casts_numeric_06(self):
await self.assert_query_result(
r'''SELECT <int16>1;''',
[1],
)
await self.assert_query_result(
r'''SELECT <int32>1;''',
[1],
)
await self.assert_query_result(
r'''SELECT <int64>1;''',
[1],
)
await self.assert_query_result(
r'''SELECT <float32>1;''',
[1.0],
)
await self.assert_query_result(
r'''SELECT <float64>1;''',
[1.0],
)
await self.assert_query_result(
r'''SELECT <bigint>1;''',
[1],
)
await self.assert_query_result(
r'''SELECT <decimal>1;''',
[1],
)
async def test_edgeql_casts_numeric_07(self):
numerics = ['int16', 'int32', 'int64', 'float32', 'float64', 'bigint',
'decimal']
for t1, t2 in itertools.product(numerics, numerics):
await self.assert_query_result(
f'''
SELECT <{t1}><{t2}>1;
''',
[1],
)
async def test_edgeql_casts_collections_01(self):
await self.assert_query_result(
r'''SELECT <array<str>>[1, 2, 3];''',
[['1', '2', '3']],
)
await self.assert_query_result(
r'''WITH X := [1, 2, 3] SELECT <array<str>> X;''',
[['1', '2', '3']],
)
await self.assert_query_result(
r'''SELECT <tuple<str, float64>> (1, '2');''',
[['1', 2.0]],
)
await self.assert_query_result(
r'''WITH X := (1, '2') SELECT <tuple<str, float64>> X;''',
[['1', 2.0]],
)
await self.assert_query_result(
r'''SELECT <array<tuple<str, float64>>> [(1, '2')];''',
[[['1', 2.0]]],
)
await self.assert_query_result(
r'''WITH X := [(1, '2')]
SELECT <array<tuple<str, float64>>> X;''',
[[['1', 2.0]]],
)
await self.assert_query_result(
r'''SELECT <tuple<array<float64>>> (['1'],);''',
[[[1.0]]],
)
async def test_edgeql_casts_collections_02(self):
await self.assert_query_result(
R'''
WITH
std AS MODULE math,
foo := (SELECT [1, 2, 3])
SELECT <array<str>>foo;
''',
[['1', '2', '3']],
)
await self.assert_query_result(
R'''
WITH
std AS MODULE math,
foo := (SELECT [<int32>1, <int32>2, <int32>3])
SELECT <array<str>>foo;
''',
[['1', '2', '3']],
)
await self.assert_query_result(
R'''
WITH
std AS MODULE math,
foo := (SELECT [(1,), (2,), (3,)])
SELECT <array<tuple<str>>>foo;
''',
[[['1'], ['2'], ['3']]],
)
# casting into an abstract scalar should be illegal
async def test_edgeql_casts_illegal_01(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r"cannot cast into generic.*'anytype'"):
await self.con.execute("""
SELECT <anytype>123;
""")
async def test_edgeql_casts_illegal_02(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r"cannot cast into generic.*anyscalar'"):
await self.con.execute("""
SELECT <anyscalar>123;
""")
async def test_edgeql_casts_illegal_03(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r"cannot cast into generic.*anyreal'"):
await self.con.execute("""
SELECT <anyreal>123;
""")
async def test_edgeql_casts_illegal_04(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r"cannot cast into generic.*anyint'"):
await self.con.execute("""
SELECT <anyint>123;
""")
async def test_edgeql_casts_illegal_05(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r'cannot cast.*'):
await self.con.execute("""
SELECT <anyfloat>123;
""")
async def test_edgeql_casts_illegal_06(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r"cannot cast into generic.*sequence'"):
await self.con.execute("""
SELECT <sequence>123;
""")
async def test_edgeql_casts_illegal_07(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r"cannot cast into generic.*anytype'"):
await self.con.execute("""
SELECT <array<anytype>>[123];
""")
async def test_edgeql_casts_illegal_08(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError, r"cannot cast into generic.*'anytype'"):
await self.con.execute("""
SELECT <tuple<int64, anytype>>(123, 123);
""")
async def test_edgeql_casts_illegal_09(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError,
r"cannot cast.*std::Object.*use.*IS schema::Object.*"):
await self.con.execute("""
SELECT <schema::Object>std::Object;
""")
# NOTE: json is a special type as it has its own type system. A
# json value can be JSON array, object, boolean, number, string or
# null. All of these JSON types have their own semantics. Casting
# into json converts data into one of those specific JSON types.
# Any of the EdgeDB numeric types (derived from anyreal) are cast
# into JSON number, str is cast into JSON string, bool is cast
# into JSON bool. Other EdgeDB scalars (like datetime) are cast
# into JSON string that represents that value (similar to casting
# to str first). Thus json values also have some type information
# and when casting back to EdgeDB scalars this type information is
# used to determine the valid casts (e.g. it's illegal to cast a
# JSON string "true" into a bool).
#
# Casting to json is lossless (in the same way and for the same
# reason as casting into str).
async def test_edgeql_casts_json_01(self):
await self.assert_query_result(
r'''SELECT <bool><json>True = True;''',
[True],
)
await self.assert_query_result(
r'''SELECT <bool><json>False = False;''',
[True],
)
await self.assert_query_result(
r'''SELECT <str><json>"Hello" = 'Hello';''',
[True],
)
await self.assert_query_result(
r'''
WITH U := uuid_generate_v1mc()
SELECT <uuid><json>U = U;
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <datetime><json>datetime_of_statement() =
datetime_of_statement();
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <cal::local_datetime><json>cal::to_local_datetime(
'2018-05-07T20:01:22.306916') =
cal::to_local_datetime('2018-05-07T20:01:22.306916');
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <cal::local_date><json>cal::to_local_date('2018-05-07')
= cal::to_local_date('2018-05-07');
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <cal::local_time><json>
cal::to_local_time('20:01:22.306916') =
cal::to_local_time('20:01:22.306916');
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <duration><json>to_duration(hours:=20) =
to_duration(hours:=20);
''',
[True],
)
await self.assert_query_result(
r'''SELECT <int16><json>to_int16('12345') = 12345;''',
[True],
)
await self.assert_query_result(
r'''SELECT <int32><json>to_int32('1234567890') = 1234567890;''',
[True],
)
await self.assert_query_result(
r'''
SELECT <int64><json>to_int64(
'1234567890123') = 1234567890123;
''',
[True],
)
await self.assert_query_result(
r'''SELECT <float32><json>to_float32('2.5') = 2.5;''',
[True],
)
await self.assert_query_result(
r'''SELECT <float64><json>to_float64('2.5') = 2.5;''',
[True],
)
await self.assert_query_result(
r'''
SELECT <bigint><json>to_bigint(
'123456789123456789123456789')
= to_bigint(
'123456789123456789123456789');
''',
[True],
)
await self.assert_query_result(
r'''
SELECT <decimal><json>to_decimal(
'123456789123456789123456789.123456789123456789123456789')
= to_decimal(
'123456789123456789123456789.123456789123456789123456789');
''',
[True],
)
async def test_edgeql_casts_json_02(self):
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <bool><json>T.p_bool = T.p_bool;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <str><json>T.p_str = T.p_str;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <datetime><json>T.p_datetime = T.p_datetime;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <cal::local_datetime><json>T.p_local_datetime =
T.p_local_datetime;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <cal::local_date><json>T.p_local_date = T.p_local_date;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <cal::local_time><json>T.p_local_time = T.p_local_time;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <duration><json>T.p_duration = T.p_duration;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <int16><json>T.p_int16 = T.p_int16;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <int32><json>T.p_int32 = T.p_int32;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <int64><json>T.p_int64 = T.p_int64;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <float32><json>T.p_float32 = T.p_float32;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <float64><json>T.p_float64 = T.p_float64;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <bigint><json>T.p_bigint = T.p_bigint;
''',
[True],
)
await self.assert_query_result(
r'''
WITH T := (SELECT Test FILTER .p_str = 'Hello')
SELECT <decimal><json>T.p_decimal = T.p_decimal;
''',
[True],
)
async def test_edgeql_casts_json_03(self):
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <bool>J.j_bool = T.p_bool;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <str>J.j_str = T.p_str;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <datetime>J.j_datetime = T.p_datetime;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <cal::local_datetime>J.j_local_datetime =
T.p_local_datetime;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <cal::local_date>J.j_local_date = T.p_local_date;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <cal::local_time>J.j_local_time = T.p_local_time;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <duration>J.j_duration = T.p_duration;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <int16>J.j_int16 = T.p_int16;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <int32>J.j_int32 = T.p_int32;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <int64>J.j_int64 = T.p_int64;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <float32>J.j_float32 = T.p_float32;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <float64>J.j_float64 = T.p_float64;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <bigint>J.j_bigint = T.p_bigint;
''',
[True],
)
await self.assert_query_result(
r'''
WITH
T := (SELECT Test FILTER .p_str = 'Hello'),
J := (SELECT JSONTest FILTER .j_str = <json>'Hello')
SELECT <decimal>J.j_decimal = T.p_decimal;
''',
[True],
)
async def test_edgeql_casts_json_04(self):
self.assertEqual(
await self.con.query('''
select <json>(
select schema::Type{name} filter .name = 'std::bool'
)
'''),
edgedb.Set(('{"name": "std::bool"}',))
)
async def test_edgeql_casts_json_05(self):
self.assertEqual(
await self.con.query(
'select <json>{(1, 2), (3, 4)}'),
['[1, 2]', '[3, 4]'])
self.assertEqual(
await self.con.query(
'select <json>{(a := 1, b := 2), (a := 3, b := 4)}'),
['{"a": 1, "b": 2}', '{"a": 3, "b": 4}'])
self.assertEqual(
await self.con.query(
'select <json>{[1, 2], [3, 4]}'),
['[1, 2]', '[3, 4]'])
self.assertEqual(
await self.con.query(
'select <json>{[(1, 2)], [(3, 4)]}'),
['[[1, 2]]', '[[3, 4]]'])
async def test_edgeql_casts_json_06(self):
self.assertEqual(
await self.con.query_json(
'select <json>{(1, 2), (3, 4)}'),
'[[1, 2], [3, 4]]')
self.assertEqual(
await self.con.query_json(
'select <json>{[1, 2], [3, 4]}'),
'[[1, 2], [3, 4]]')
self.assertEqual(
await self.con.query_json(
'select <json>{[(1, 2)], [(3, 4)]}'),
'[[[1, 2]], [[3, 4]]]')
async def test_edgeql_casts_json_07(self):
# This is the same suite of tests as for str. The point is
# that when it comes to casting into various date and time
# types JSON strings and regular strings should behave
# identically.
#
# Canonical date and time str representations must follow ISO
# 8601. This test assumes that the server is configured to be
# in UTC time zone.
await self.assert_query_result(
r'''
WITH x := <json>'2018-05-07T20:01:22.306916+00:00'
SELECT <json><datetime>x = x;
''',
[True],
)
await self.assert_query_result(
# validating that these are all in fact the same datetime
r'''
WITH x := <json>{
'2018-05-07T15:01:22.306916-05:00',
'2018-05-07T15:01:22.306916-05',
'2018-05-07T20:01:22.306916Z',
'2018-05-07T20:01:22.306916+0000',
'2018-05-07T20:01:22.306916+00',
# the '-' and ':' separators may be omitted
'20180507T200122.306916+00',
# acceptable RFC 3339
'2018-05-07 20:01:22.306916+00:00',
'2018-05-07t20:01:22.306916z',
}
SELECT <datetime>x =
<datetime><json>'2018-05-07T20:01:22.306916+00:00';
''',
[True, True, True, True, True, True, True, True],
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax'):
await self.con.query_single(
'SELECT <datetime><json>"2018-05-07;20:01:22.306916+00:00"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax'):
await self.con.query_single(
'SELECT <datetime><json>"2018-05-07T20:01:22.306916"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax'):
await self.con.query_single(
'SELECT <datetime><json>"2018-05-07T20:01:22.306916 1000"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax'):
await self.con.query_single(
'''SELECT <datetime><json>
"2018-05-07T20:01:22.306916 US/Central"
''')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax'):
await self.con.query_single(
'SELECT <datetime><json>"2018-05-07T20:01:22.306916 +GMT1"')
async def test_edgeql_casts_json_08(self):
# This is the same suite of tests as for str. The point is
# that when it comes to casting into various date and time
# types JSON strings and regular strings should behave
# identically.
#
# Canonical date and time str representations must follow ISO
# 8601. This test assumes that the server is configured to be
# in UTC time zone.
await self.assert_query_result(
r'''
WITH x := <json>'2018-05-07T20:01:22.306916'
SELECT <json><cal::local_datetime>x = x;
''',
[True],
)
await self.assert_query_result(
# validating that these are all in fact the same datetime
r'''
WITH x := <json>{
# the '-' and ':' separators may be omitted
'20180507T200122.306916',
# acceptable RFC 3339
'2018-05-07 20:01:22.306916',
'2018-05-07t20:01:22.306916',
}
SELECT <cal::local_datetime>x =
<cal::local_datetime><json>'2018-05-07T20:01:22.306916';
''',
[True, True, True],
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'''SELECT
<cal::local_datetime><json>"2018-05-07;20:01:22.306916"
''')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'''SELECT <cal::local_datetime><json>
"2018-05-07T20:01:22.306916+01:00"
''')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'''SELECT <cal::local_datetime><json>
"2018-05-07T20:01:22.306916 GMT"''')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'''SELECT <cal::local_datetime><json>
"2018-05-07T20:01:22.306916 GMT0"''')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'''SELECT <cal::local_datetime><json>
"2018-05-07T20:01:22.306916 US/Central"
''')
async def test_edgeql_casts_json_09(self):
# This is the same suite of tests as for str. The point is
# that when it comes to casting into various date and time
# types JSON strings and regular strings should behave
# identically.
#
# Canonical date and time str representations must follow ISO
# 8601.
await self.assert_query_result(
r'''
WITH x := <json>'2018-05-07'
SELECT <json><cal::local_date>x = x;
''',
[True],
)
await self.assert_query_result(
# validating that these are all in fact the same date
r'''
# the '-' separators may be omitted
WITH x := <json>'20180507'
SELECT
<cal::local_date>x = <cal::local_date><json>'2018-05-07';
''',
[True],
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'SELECT <cal::local_date><json>"2018-05-07T20:01:22.306916"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'SELECT <cal::local_date><json>"2018/05/07"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'SELECT <cal::local_date><json>"2018.05.07"')
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'SELECT <cal::local_date><json>"2018-05-07+01:00"')
async def test_edgeql_casts_json_10(self):
# This is the same suite of tests as for str. The point is
# that when it comes to casting into various date and time
# types JSON strings and regular strings should behave
# identically.
#
# Canonical date and time str representations must follow ISO
# 8601.
await self.assert_query_result(
r'''
WITH x := <json>'20:01:22.306916'
SELECT <json><cal::local_time>x = x;
''',
[True],
)
await self.assert_query_result(
r'''
WITH x := <json>{
'20:01',
'20:01:00',
# the ':' separators may be omitted
'2001',
'200100',
}
SELECT <cal::local_time>x = <cal::local_time>'20:01:00';
''',
[True, True, True, True],
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
'invalid input syntax for type cal::local_time'):
await self.con.query_single(
"SELECT <cal::local_time><json>'2018-05-07 20:01:22'")
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid input syntax for type'):
await self.con.query_single(
'SELECT <cal::local_time><json>"20:01:22.306916+01:00"')
async def test_edgeql_casts_json_11(self):
await self.assert_query_result(
r"SELECT <array<int64>><json>[1, 1, 2, 3, 5]",
[[1, 1, 2, 3, 5]]
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'expected JSON number or null; got JSON string'):
await self.con.query_single(
r"SELECT <array<int64>><json>['asdf']")
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'expected JSON number or null; got JSON string'):
await self.con.query_single(
r"SELECT <array<int64>>to_json('[1, 2, \"asdf\"]')")
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid null value in cast'):
await self.con.query_single(
r"SELECT <array<int64>>[to_json('1'), to_json('null')]")
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid null value in cast'):
await self.con.query_single(
r"SELECT <array<int64>>to_json('[1, 2, null]')")
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid null value in cast'):
await self.con.query_single(
r"SELECT <array<int64>><array<json>>to_json('[1, 2, null]')")
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'cannot extract elements from a scalar'):
await self.con.query_single(
r"SELECT <array<int64>><json>'asdf'")
async def test_edgeql_casts_json_12(self):
self.assertEqual(
await self.con.query(
r"""
SELECT <tuple<a: int64, b: int64>>
to_json('{"a": 1, "b": 2}')
"""
),
[edgedb.NamedTuple(a=1, b=2)],
)
await self.assert_query_result(
r"""
SELECT <tuple<a: int64, b: int64>>
to_json({'{"a": 3000, "b": -1}', '{"a": 1, "b": 12}'});
""",
[{"a": 3000, "b": -1}, {"a": 1, "b": 12}],
)
await self.assert_query_result(
r"""
SELECT <tuple<int64, int64>>
to_json({'[3000, -1]', '[1, 12]'})
""",
[[3000, -1], [1, 12]],
)
self.assertEqual(
await self.con.query(
r"""
SELECT <tuple<int64, int64>>
to_json({'[3000, -1]', '[1, 12]'})
"""
),
[(3000, -1), (1, 12)],
)
self.assertEqual(
await self.con.query(
r"""
SELECT <tuple<json, json>>
to_json({'[3000, -1]', '[1, 12]'})
"""
),
[('3000', '-1'), ('1', '12')],
)
self.assertEqual(
await self.con.query(
r"""
SELECT <tuple<json, json>>
to_json({'[3000, -1]', '[1, null]'})
"""
),
[('3000', '-1'), ('1', 'null')],
)
self.assertEqual(
await self.con.query_single(
r"""
SELECT <tuple<int64, tuple<a: int64, b: int64>>>
to_json('[3000, {"a": 1, "b": 2}]')
"""
),
(3000, edgedb.NamedTuple(a=1, b=2))
)
self.assertEqual(
await self.con.query_single(
r"""
SELECT <tuple<int64, array<tuple<a: int64, b: str>>>>
to_json('[3000, [{"a": 1, "b": "foo"},
{"a": 12, "b": "bar"}]]')
"""
),
(3000,
[edgedb.NamedTuple(a=1, b="foo"),
edgedb.NamedTuple(a=12, b="bar")])
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'expected JSON number or null; got JSON string'):
await self.con.query(
r"""
SELECT <tuple<a: int64, b: int64>>
to_json('{"a": 1, "b": "2"}')
"""
)
# This isn't really the best error message for this.
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid null value in cast'):
await self.con.query(
r"""SELECT <tuple<a: int64, b: int64>>to_json('{"a": 1}')"""
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid null value in cast'):
await self.con.query(
r"""SELECT <tuple<int64, int64>>to_json('[3000]')"""
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid null value in cast'):
await self.con.query(
r"""
SELECT <tuple<a: int64, b: int64>>
to_json('[3000, 1000]')
"""
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid null value in cast'):
await self.con.query(
r"""SELECT <tuple<a: int64, b: int64>> to_json('"test"')"""
)
async with self.assertRaisesRegexTx(
edgedb.InvalidValueError,
r'invalid null value in cast'):
await self.con.query(
r"""SELECT <tuple<json, json>> to_json('[3000]')"""
)
async def test_edgeql_casts_assignment_01(self):
async with self._run_and_rollback():
await self.con.execute(r"""
# int64 is assignment castable or implicitly castable
# into any other numeric type
INSERT ScalarTest {
p_int16 := 1,
p_int32 := 1,
p_int64 := 1,
p_float32 := 1,
p_float64 := 1,
p_bigint := 1,
p_decimal := 1,
};
""")
await self.assert_query_result(
r"""
SELECT ScalarTest {
p_int16,
p_int32,
p_int64,
p_float32,
p_float64,
p_bigint,
p_decimal,
};
""",
[{
'p_int16': 1,
'p_int32': 1,
'p_int64': 1,
'p_float32': 1,
'p_float64': 1,
'p_bigint': 1,
'p_decimal': 1,
}],
)
async def test_edgeql_casts_assignment_02(self):
async with self._run_and_rollback():
await self.con.execute(r"""
# float64 is assignment castable to float32
INSERT ScalarTest {
p_float32 := 1.5,
};
""")
await self.assert_query_result(
r"""
SELECT ScalarTest {
p_float32,
};
""",
[{
'p_float32': 1.5,
}],
)
async def test_edgeql_casts_assignment_03(self):
async with self._run_and_rollback():
# in particular, bigint and decimal are not assignment-castable
# into any other numeric type
for typename in ['int16',
'int32',
'int64',
'float32',
'float64']:
for numtype in {'bigint', 'decimal'}:
query = f'''
INSERT ScalarTest {{
p_{typename} := <{numtype}>3,
p_{numtype} := 1001,
}};
'''
async with self.assertRaisesRegexTx(
edgedb.QueryError,
r'invalid target for property',
msg=query):
await self.con.execute(query + f'''
# clean up, so other tests can proceed
DELETE (
SELECT ScalarTest
FILTER .p_{numtype} = 1001
);
''')
async def test_edgeql_casts_custom_scalar_01(self):
await self.assert_query_result(
'''
SELECT <custom_str_t>'ABC'
''',
['ABC']
)
async with self.assertRaisesRegexTx(
edgedb.ConstraintViolationError,
'invalid custom_str_t'):
await self.con.query(
"SELECT <custom_str_t>'123'")
async def test_edgeql_casts_prohibit_tuple_query_params_01(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError,
r'cannot pass tuples as query parameters',
):
await self.con.query(
r'''
SELECT Test {
id,
num := (<tuple<int64, float64, str, bytes>>$tup).0,
st := (<tuple<int64, float64, str, bytes>>$tup).2,
};
''',
tup=(0, 1.0, "str", b"bytes"),
)
async def test_edgeql_casts_prohibit_tuple_query_params_02(self):
async with self.assertRaisesRegexTx(
edgedb.QueryError,
r'cannot pass collections with tuple elements'
r' as query parameters',
):
await self.con.query(
r"SELECT <array<tuple<int64, str>>>$0;",
[(0, 'zero'), (1, 'one')],
)
async def test_edgeql_cast_empty_set_to_array_01(self):
await self.assert_query_result(
r'''
SELECT <array<Object>>{};
''',
[],
)
| edgedb/edgedb | tests/test_edgeql_casts.py | Python | apache-2.0 | 82,780 |
__source__ = 'https://leetcode.com/problems/is-subsequence/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/is-subsequence.py
# Time: O(n)
# Space: O(1)
#
# Description: Leetcode # 392. Is Subsequence
#
# Given a string s and a string t, check if s is subsequence of t.
#
# You may assume that there is only lower case English letters in both s and t.
# t is potentially a very long (length ~= 500,000) string, and s is a short string (<=100).
#
# A subsequence of a string is a new string which is formed from
# the original string by deleting some (can be none) of the characters
# without disturbing the relative positions of the remaining characters.
# (ie, "ace" is a subsequence of "abcde" while "aec" is not).
#
# Example 1:
# s = "abc", t = "ahbgdc"
#
# Return true.
#
# Example 2:
# s = "axc", t = "ahbgdc"
#
# Return false.
#
# Follow up:
# If there are lots of incoming S, say S1, S2, ... , Sk where k >= 1B,
# and you want to check one by one to see if T has its subsequence.
# In this scenario, how would you change your code?
#
# Companies
# Pinterest
# Related Topics
# Binary Search Dynamic Programming Greedy
#
import unittest
# Greedy solution.
# 128ms 75.77%
class Solution(object):
def isSubsequence(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if not s:
return True
i = 0
for c in t:
if c == s[i]:
i += 1
if i == len(s):
break
return i == len(s)
def isSubsequence2(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
t = iter(t)
return all(c in t for c in s)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
# two pointers:
# 17ms 77.84%
class Solution {
public boolean isSubsequence(String s, String t) {
if (s.length() == 0) return true;
int indexS = 0, indexT = 0;
while (indexT < t.length()) {
if (t.charAt(indexT) == s.charAt(indexS)) {
indexS++;
if (indexS == s.length()) return true;
}
indexT++;
}
return false;
}
}
# 1ms 100%
class Solution {
public boolean isSubsequence(String s, String t) {
if (s.length() == 0) return true;
int prev = t.indexOf(s.charAt(0));
if (prev == -1) return false;
for (int i = 1; i < s.length(); i++) {
prev = t.indexOf(s.charAt(i), prev + 1);
if (prev == -1) return false;
}
return true;
}
}
# 1ms 100%
class Solution {
public boolean isSubsequence(String s, String t) {
int[] a = new int[s.length() + 1];
a[0] = -1;
for (int i = 0; i < s.length(); i++) {
int index = t.indexOf(s.charAt(i), a[i] + 1);
if (index == -1) {
return false;
}
a[i + 1] = index;
}
return true;
}
}
# quick examples fro java Collections.binarySearch(list, key)
# http://www.geeksforgeeks.org/collections-binarysearch-java-examples/
// Returns index of key in sorted list sorted in
// ascending order
public static int binarySearch(List slist, T key)
// Returns index of key in sorted list sorted in
// order defined by Comparator c.
public static int binarySearch(List slist, T key, Comparator c)
If key is not present, the it returns "(-(insertion point) - 1)".
The insertion point is defined as the point at which the key
would be inserted into the list.
public static void main(String[] args)
{
List al = new ArrayList();
al.add(1);
al.add(2);
al.add(3);
al.add(10);
al.add(20);
// 10 is present at index 3.
int index = Collections.binarySearch(al, 10);
System.out.println(index);
// 13 is not present. 13 would have been inserted
// at position 4. So the function returns (-4-1)
// which is -5.
index = Collections.binarySearch(al, 15);
System.out.println(index);
}
Binary search solution for follow-up with detailed comments
Re: Java binary search using TreeSet got TLE
I think the Map and TreeSet could be simplified by Array and binarySearch.
Since we scan T from beginning to the end (index itself is in increasing order),
List will be sufficient. Then we can use binarySearch to replace with TreeSet
ability which is a little overkill for this problem. Here is my solution.
// Follow-up: O(N) time for pre-processing, O(Mlog?) for each S.
// Eg-1. s="abc", t="bahbgdca"
// idx=[a={1,7}, b={0,3}, c={6}]
// i=0 ('a'): prev=1
// i=1 ('b'): prev=3
// i=2 ('c'): prev=6 (return true)
// Eg-2. s="abc", t="bahgdcb"
// idx=[a={1}, b={0,6}, c={5}]
// i=0 ('a'): prev=1
// i=1 ('b'): prev=6
// i=2 ('c'): prev=? (return false)
# 49ms 18.85%
class Solution {
public boolean isSubsequence(String s, String t) {
List<Integer>[] idx = new List[256]; // Just for clarity
for (int i = 0; i < t.length(); i++) {
if (idx[t.charAt(i)] == null)
idx[t.charAt(i)] = new ArrayList<>();
idx[t.charAt(i)].add(i);
}
int prev = 0;
for (int i = 0; i < s.length(); i++) {
if (idx[s.charAt(i)] == null) return false; // Note: char of S does NOT exist in T causing NPE
int j = Collections.binarySearch(idx[s.charAt(i)], prev);
if (j < 0) j = -j - 1;
if (j == idx[s.charAt(i)].size()) return false;
prev = idx[s.charAt(i)].get(j) + 1;
}
return true;
}
}
''' | JulyKikuAkita/PythonPrac | cs15211/IsSubsequence.py | Python | apache-2.0 | 5,737 |
from dirbalak import graph
from dirbalak import repomirrorcache
from dirbalak import describetime
from upseto import gitwrapper
class DependencyGraph:
_CONTINUOUS_INTEGRATION_VIOLATION_TIME = 14 * 24 * 60 * 60
def __init__(self, dependencies, getNodeAttributesCallback):
self._dependencies = dependencies
self._getNodeAttributesCallback = getNodeAttributesCallback
self._cachedGraph = None
def renderText(self):
return "\n".join([str(d) for d in self._dependencies]) + \
"\n\n" + self.makeGraph().renderAsTreeText()
def makeGraph(self):
if self._cachedGraph is None:
self._cachedGraph = self._makeGraph()
return self._cachedGraph
def _makeGraph(self):
graphInstance = graph.Graph(dict(ranksep=0.7))
for dep in self._dependencies:
self._addNodeToGraph(graphInstance, dep.gitURL)
if dep.requiringURL is not None:
if dep.requiringURLHash != 'origin/master':
continue
self._addNodeToGraph(graphInstance, dep.requiringURL)
self._addArcToGraph(graphInstance, dep)
return graphInstance
def _lineStyleFromDependencyType(self, type):
if type == 'upseto':
return 'solid'
elif type == 'solvent':
return 'dashed'
elif type == 'dirbalak_build_rootfs':
return 'dotted'
else:
raise AssertionError("Unknown type %s" % type)
def _addArcToGraph(self, graphInstance, dep):
basename = gitwrapper.originURLBasename(dep.gitURL)
mirror = repomirrorcache.get(dep.gitURL)
distance = mirror.distanceFromMaster(dep.hash)
requiringBasename = gitwrapper.originURLBasename(dep.requiringURL)
graphInstance.addArc(
requiringBasename, basename, style=self._lineStyleFromDependencyType(dep.type),
** self._attributesFromDistanceFromMaster(distance))
def _addNodeToGraph(self, graphInstance, gitURL):
basename = gitwrapper.originURLBasename(gitURL)
attributes = self._getNodeAttributesCallback(gitURL)
attributes['label'] = basename
graphInstance.setNodeAttributes(basename, **attributes)
def _attributesFromDistanceFromMaster(self, distance):
if distance is None:
return {}
else:
if distance['broken']:
return dict(color="orange", label="broken")
else:
label = "behind:\\n%d commits" % distance['commits']
color = "#000000"
if 'time' in distance:
label += "\\n%s" % describetime.describeTime(distance['time'])
if distance['time'] > self._CONTINUOUS_INTEGRATION_VIOLATION_TIME:
color = "#FF0000"
else:
color = "#990000"
return dict(color=color, label=label)
| Stratoscale/dirbalak | py/dirbalak/dependencygraph.py | Python | apache-2.0 | 2,973 |
# Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
import webob
from nova.api.openstack import api_version_request as avr
from nova.api.openstack.compute import server_groups as sg_v21
from nova import context
from nova import exception
from nova import objects
from nova.policies import server_groups as sg_policies
from nova import test
from nova.tests import fixtures
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import policy_fixture
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def server_group_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
return sgroup
def server_group_resp_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
if 'policy' not in kwargs:
sgroup.setdefault('policies', [])
sgroup.setdefault('members', [])
return sgroup
def server_group_db(sg):
attrs = copy.deepcopy(sg)
if 'id' in attrs:
attrs['uuid'] = attrs.pop('id')
if 'policies' in attrs:
policies = attrs.pop('policies')
attrs['policies'] = policies
else:
attrs['policies'] = []
if 'policy' in attrs:
del attrs['policies']
if 'members' in attrs:
members = attrs.pop('members')
attrs['members'] = members
else:
attrs['members'] = []
attrs['deleted'] = 0
attrs['deleted_at'] = None
attrs['created_at'] = None
attrs['updated_at'] = None
if 'user_id' not in attrs:
attrs['user_id'] = fakes.FAKE_USER_ID
if 'project_id' not in attrs:
attrs['project_id'] = fakes.FAKE_PROJECT_ID
attrs['id'] = 7
return AttrDict(attrs)
class ServerGroupTestV21(test.NoDBTestCase):
USES_DB_SELF = True
validation_error = exception.ValidationError
wsgi_api_version = '2.1'
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self._setup_controller()
self.req = fakes.HTTPRequest.blank('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.foo_req = fakes.HTTPRequest.blank('', project_id='foo')
self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(fixtures.Database(database='api'))
cells = fixtures.CellDatabases()
cells.add_cell_database(uuidsentinel.cell1)
cells.add_cell_database(uuidsentinel.cell2)
self.useFixture(cells)
ctxt = context.get_admin_context()
self.cells = {}
for uuid in (uuidsentinel.cell1, uuidsentinel.cell2):
cm = objects.CellMapping(context=ctxt,
uuid=uuid,
database_connection=uuid,
transport_url=uuid)
cm.create()
self.cells[cm.uuid] = cm
def _setup_controller(self):
self.controller = sg_v21.ServerGroupController()
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
sgroup = server_group_template()
sgroup['policies'] = policies
res_dict = self.controller.create(self.req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group_with_new_policy_before_264(self):
req = fakes.HTTPRequest.blank('', version='2.63')
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
# 'policy' isn't an acceptable request key before 2.64
sgroup = server_group_template(policy=policy)
result = self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
self.assertIn(
"Invalid input for field/attribute server_group",
str(result)
)
# 'rules' isn't an acceptable request key before 2.64
sgroup = server_group_template(rules=rules)
result = self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
self.assertIn(
"Invalid input for field/attribute server_group",
str(result)
)
def test_create_server_group(self):
policies = ['affinity', 'anti-affinity']
for policy in policies:
self._create_server_group_normal(policies=[policy])
def test_create_server_group_rbac_default(self):
sgroup = server_group_template()
sgroup['policies'] = ['affinity']
# test as admin
self.controller.create(self.admin_req, body={'server_group': sgroup})
# test as non-admin
self.controller.create(self.req, body={'server_group': sgroup})
def test_create_server_group_rbac_admin_only(self):
sgroup = server_group_template()
sgroup['policies'] = ['affinity']
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'create'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
self.controller.create(self.admin_req, body={'server_group': sgroup})
# check for failure as non-admin
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create, self.req,
body={'server_group': sgroup})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def _create_instance(self, ctx, cell):
with context.target_cell(ctx, cell) as cctx:
instance = objects.Instance(context=cctx,
image_ref=uuidsentinel.fake_image_ref,
node='node1', reservation_id='a',
host='host1',
project_id=fakes.FAKE_PROJECT_ID,
vm_state='fake',
system_metadata={'key': 'value'})
instance.create()
im = objects.InstanceMapping(context=ctx,
project_id=ctx.project_id,
user_id=ctx.user_id,
cell_mapping=cell,
instance_uuid=instance.uuid)
im.create()
return instance
def _create_instance_group(self, context, members):
ig = objects.InstanceGroup(context=context, name='fake_name',
user_id='fake_user', project_id=fakes.FAKE_PROJECT_ID,
members=members)
ig.create()
return ig.uuid
def _create_groups_and_instances(self, ctx):
cell1 = self.cells[uuidsentinel.cell1]
cell2 = self.cells[uuidsentinel.cell2]
instances = [self._create_instance(ctx, cell=cell1),
self._create_instance(ctx, cell=cell2),
self._create_instance(ctx, cell=None)]
members = [instance.uuid for instance in instances]
ig_uuid = self._create_instance_group(ctx, members)
return (ig_uuid, instances, members)
def _test_list_server_group_all(self, api_version='2.1'):
self._test_list_server_group(api_version=api_version,
limited='',
path='/os-server-groups?all_projects=True')
def _test_list_server_group_offset_and_limit(self, api_version='2.1'):
self._test_list_server_group(api_version=api_version,
limited='&offset=1&limit=1',
path='/os-server-groups?all_projects=True')
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
@mock.patch('nova.objects.InstanceGroupList.get_all')
def _test_list_server_group(self, mock_get_all, mock_get_by_project,
path, api_version='2.1', limited=None):
policies = ['anti-affinity']
policy = "anti-affinity"
members = []
metadata = {} # always empty
names = ['default-x', 'test']
p_id = fakes.FAKE_PROJECT_ID
u_id = fakes.FAKE_USER_ID
ver = avr.APIVersionRequest(api_version)
if ver >= avr.APIVersionRequest("2.64"):
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policy=policy,
rules={},
members=members,
project_id=p_id,
user_id=u_id)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policy=policy,
rules={},
members=members,
project_id=p_id,
user_id=u_id)
elif ver >= avr.APIVersionRequest("2.13"):
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policies=policies,
members=members,
metadata=metadata,
project_id=p_id,
user_id=u_id)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policies=policies,
members=members,
metadata=metadata,
project_id=p_id,
user_id=u_id)
else:
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policies=policies,
members=members,
metadata=metadata)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policies=policies,
members=members,
metadata=metadata)
tenant_groups = [sg2]
all_groups = [sg1, sg2]
if limited:
all = {'server_groups': [sg2]}
tenant_specific = {'server_groups': []}
else:
all = {'server_groups': all_groups}
tenant_specific = {'server_groups': tenant_groups}
def return_all_server_groups():
return objects.InstanceGroupList(
objects=[objects.InstanceGroup(
**server_group_db(sg)) for sg in all_groups])
mock_get_all.return_value = return_all_server_groups()
def return_tenant_server_groups():
return objects.InstanceGroupList(
objects=[objects.InstanceGroup(
**server_group_db(sg)) for sg in tenant_groups])
mock_get_by_project.return_value = return_tenant_server_groups()
path = path or '/os-server-groups?all_projects=True'
if limited:
path += limited
req = fakes.HTTPRequest.blank(path, version=api_version)
admin_req = fakes.HTTPRequest.blank(path, use_admin_context=True,
version=api_version)
# test as admin
res_dict = self.controller.index(admin_req)
self.assertEqual(all, res_dict)
# test as non-admin
res_dict = self.controller.index(req)
self.assertEqual(tenant_specific, res_dict)
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
def _test_list_server_group_by_tenant(self, mock_get_by_project,
api_version='2.1'):
policies = ['anti-affinity']
members = []
metadata = {} # always empty
names = ['default-x', 'test']
p_id = fakes.FAKE_PROJECT_ID
u_id = fakes.FAKE_USER_ID
if api_version >= '2.13':
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policies=policies,
members=members,
metadata=metadata,
project_id=p_id,
user_id=u_id)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policies=policies,
members=members,
metadata=metadata,
project_id=p_id,
user_id=u_id)
else:
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policies=policies,
members=members,
metadata=metadata)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policies=policies,
members=members,
metadata=metadata)
groups = [sg1, sg2]
expected = {'server_groups': groups}
def return_server_groups():
return objects.InstanceGroupList(
objects=[objects.InstanceGroup(
**server_group_db(sg)) for sg in groups])
return_get_by_project = return_server_groups()
mock_get_by_project.return_value = return_get_by_project
path = '/os-server-groups'
req = fakes.HTTPRequest.blank(path, version=api_version)
res_dict = self.controller.index(req)
self.assertEqual(expected, res_dict)
def test_display_members(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
res_dict = self.controller.show(self.req, ig_uuid)
result_members = res_dict['server_group']['members']
self.assertEqual(3, len(result_members))
for member in members:
self.assertIn(member, result_members)
def test_display_members_with_nonexistent_group(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, uuidsentinel.group)
def test_display_active_members_only(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
# delete an instance
im = objects.InstanceMapping.get_by_instance_uuid(ctx,
instances[1].uuid)
with context.target_cell(ctx, im.cell_mapping) as cctxt:
instances[1]._context = cctxt
instances[1].destroy()
# check that the instance does not exist
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid,
ctx, instances[1].uuid)
res_dict = self.controller.show(self.req, ig_uuid)
result_members = res_dict['server_group']['members']
# check that only the active instance is displayed
self.assertEqual(2, len(result_members))
self.assertIn(instances[0].uuid, result_members)
def test_display_members_rbac_default(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
ig_uuid = self._create_groups_and_instances(ctx)[0]
# test as admin
self.controller.show(self.admin_req, ig_uuid)
# test as non-admin, same project
self.controller.show(self.req, ig_uuid)
# test as non-admin, different project
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.foo_req, ig_uuid)
def test_display_members_rbac_admin_only(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
ig_uuid = self._create_groups_and_instances(ctx)[0]
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'show'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
self.controller.show(self.admin_req, ig_uuid)
# check for failure as non-admin
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, self.req, ig_uuid)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_create_server_group_with_non_alphanumeric_in_name(self):
# The fix for bug #1434335 expanded the allowable character set
# for server group names to include non-alphanumeric characters
# if they are printable.
sgroup = server_group_template(name='good* $%name',
policies=['affinity'])
res_dict = self.controller.create(self.req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'good* $%name')
def test_create_server_group_with_illegal_name(self):
# blank name
sgroup = server_group_template(name='', policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with length 256
sgroup = server_group_template(name='1234567890' * 26,
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# non-string name
sgroup = server_group_template(name=12, policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with leading spaces
sgroup = server_group_template(name=' leading spaces',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with trailing spaces
sgroup = server_group_template(name='trailing space ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with all spaces
sgroup = server_group_template(name=' ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with unprintable character
sgroup = server_group_template(name='bad\x00name',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with out of range char U0001F4A9
sgroup = server_group_template(name=u"\U0001F4A9",
policies=['affinity'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
# blank policy
sgroup = server_group_template(name='fake-name', policies='')
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(name='fake-name', policies=7)
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(name='fake-name', policies='invalid')
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(name='fake-name', policies=None)
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_conflicting_policies(self):
sgroup = server_group_template()
policies = ['anti-affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_duplicate_policies(self):
sgroup = server_group_template()
policies = ['affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_not_supported(self):
sgroup = server_group_template()
policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_no_body(self):
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=None)
def test_create_server_group_with_no_server_group(self):
body = {'no-instanceGroup': None}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_list_server_group_by_tenant(self):
self._test_list_server_group_by_tenant(
api_version=self.wsgi_api_version)
def test_list_server_group_all_v20(self):
self._test_list_server_group_all(api_version='2.0')
def test_list_server_group_all(self):
self._test_list_server_group_all(
api_version=self.wsgi_api_version)
def test_list_server_group_offset_and_limit(self):
self._test_list_server_group_offset_and_limit(
api_version=self.wsgi_api_version)
def test_list_server_groups_rbac_default(self):
# test as admin
self.controller.index(self.admin_req)
# test as non-admin
self.controller.index(self.req)
def test_list_server_group_multiple_param(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
limited='&offset=2&limit=2&limit=1&offset=1',
path='/os-server-groups?all_projects=False&all_projects=True')
def test_list_server_group_additional_param(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
limited='&offset=1&limit=1',
path='/os-server-groups?dummy=False&all_projects=True')
def test_list_server_group_param_as_int(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
limited='&offset=1&limit=1',
path='/os-server-groups?all_projects=1')
def test_list_server_group_negative_int_as_offset(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&offset=-1',
path='/os-server-groups?all_projects=1')
def test_list_server_group_string_int_as_offset(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&offset=dummy',
path='/os-server-groups?all_projects=1')
def test_list_server_group_multiparam_string_as_offset(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&offset=dummy&offset=1',
path='/os-server-groups?all_projects=1')
def test_list_server_group_negative_int_as_limit(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&limit=-1',
path='/os-server-groups?all_projects=1')
def test_list_server_group_string_int_as_limit(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&limit=dummy',
path='/os-server-groups?all_projects=1')
def test_list_server_group_multiparam_string_as_limit(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&limit=dummy&limit=1',
path='/os-server-groups?all_projects=1')
def test_list_server_groups_rbac_admin_only(self):
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'index'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
self.controller.index(self.admin_req)
# check for failure as non-admin
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.objects.InstanceGroup.destroy')
def test_delete_server_group_by_id(self, mock_destroy):
sg = server_group_template(id=uuidsentinel.sg1_id)
def return_server_group(_cls, context, group_id):
self.assertEqual(sg['id'], group_id)
return objects.InstanceGroup(**server_group_db(sg))
self.stub_out('nova.objects.InstanceGroup.get_by_uuid',
return_server_group)
resp = self.controller.delete(self.req, uuidsentinel.sg1_id)
mock_destroy.assert_called_once_with()
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller, sg_v21.ServerGroupController):
status_int = self.controller.delete.wsgi_code
else:
status_int = resp.status_int
self.assertEqual(204, status_int)
def test_delete_non_existing_server_group(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, 'invalid')
def test_delete_server_group_rbac_default(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
# test as admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
self.controller.delete(self.admin_req, ig_uuid)
# test as non-admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
self.controller.delete(self.req, ig_uuid)
def test_delete_server_group_rbac_admin_only(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'delete'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
self.controller.delete(self.admin_req, ig_uuid)
# check for failure as non-admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.delete, self.req, ig_uuid)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class ServerGroupTestV213(ServerGroupTestV21):
wsgi_api_version = '2.13'
def _setup_controller(self):
self.controller = sg_v21.ServerGroupController()
def test_list_server_group_all(self):
self._test_list_server_group_all(api_version='2.13')
def test_list_server_group_offset_and_limit(self):
self._test_list_server_group_offset_and_limit(api_version='2.13')
def test_list_server_group_by_tenant(self):
self._test_list_server_group_by_tenant(api_version='2.13')
class ServerGroupTestV264(ServerGroupTestV213):
wsgi_api_version = '2.64'
def _setup_controller(self):
self.controller = sg_v21.ServerGroupController()
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
sgroup = server_group_template()
sgroup['rules'] = rules or {}
sgroup['policy'] = policy
res_dict = self.controller.create(req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policy'], policy)
self.assertEqual(res_dict['server_group']['rules'], rules or {})
return res_dict['server_group']['id']
def test_list_server_group_all(self):
self._test_list_server_group_all(api_version=self.wsgi_api_version)
def test_create_and_show_server_group(self):
policies = ['affinity', 'anti-affinity']
for policy in policies:
g_uuid = self._create_server_group_normal(
policy=policy)
res_dict = self._display_server_group(g_uuid)
self.assertEqual(res_dict['server_group']['policy'], policy)
self.assertEqual(res_dict['server_group']['rules'], {})
def _display_server_group(self, uuid):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
group = self.controller.show(req, uuid)
return group
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=33)
def test_create_and_show_server_group_with_rules(self, mock_get_v):
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
g_uuid = self._create_server_group_normal(
policy=policy, rules=rules)
res_dict = self._display_server_group(g_uuid)
self.assertEqual(res_dict['server_group']['policy'], policy)
self.assertEqual(res_dict['server_group']['rules'], rules)
def test_create_affinity_server_group_with_invalid_policy(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body={'server_group': sgroup})
self.assertIn("Only anti-affinity policy supports rules", str(result))
def test_create_anti_affinity_server_group_with_invalid_rules(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
# A negative test for key is unknown, the value is not positive
# and not integer
invalid_rules = [{'unknown_key': '3'},
{'max_server_per_host': 0},
{'max_server_per_host': 'foo'}]
for r in invalid_rules:
sgroup = server_group_template(policy='anti-affinity', rules=r)
result = self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
self.assertIn(
"Invalid input for field/attribute", str(result)
)
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=32)
def test_create_server_group_with_low_version_compute_service(self,
mock_get_v):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='anti-affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(
webob.exc.HTTPConflict,
self.controller.create, req, body={'server_group': sgroup})
self.assertIn("Creating an anti-affinity group with rule "
"max_server_per_host > 1 is not yet supported.",
str(result))
def test_create_server_group(self):
policies = ['affinity', 'anti-affinity']
for policy in policies:
self._create_server_group_normal(policy=policy)
def test_policies_since_264(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
# 'policies' isn't allowed in request >= 2.64
sgroup = server_group_template(policies=['anti-affinity'])
self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_create_server_group_without_policy(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
# 'policy' is required request key in request >= 2.64
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
# blank policy
sgroup = server_group_template(policy='')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(policy=7)
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(policy='invalid')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(policy=None)
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_additional_params(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
sgroup = server_group_template(unknown='unknown')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
class ServerGroupTestV275(ServerGroupTestV264):
wsgi_api_version = '2.75'
def test_list_server_group_additional_param_old_version(self):
self._test_list_server_group(api_version='2.74',
limited='&offset=1&limit=1',
path='/os-server-groups?dummy=False&all_projects=True')
def test_list_server_group_additional_param(self):
req = fakes.HTTPRequest.blank('/os-server-groups?dummy=False',
version=self.wsgi_api_version)
self.assertRaises(self.validation_error, self.controller.index,
req)
| klmitch/nova | nova/tests/unit/api/openstack/compute/test_server_groups.py | Python | apache-2.0 | 37,885 |
#!/usr/bin/env python
"""
responses
=========
A utility library for mocking out the `requests` Python library.
:copyright: (c) 2013 Dropbox, Inc.
"""
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
setup_requires = []
if 'test' in sys.argv:
setup_requires.append('pytest')
install_requires = [
'requests',
'mock',
'six',
]
tests_require = [
'pytest',
'pytest-cov',
'flake8',
]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['test_responses.py']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='responses',
version='0.2.2',
author='David Cramer',
description=(
'A utility library for mocking out the `requests` Python library.'
),
long_description=open('README.rst').read(),
py_modules=['responses'],
zip_safe=False,
install_requires=install_requires,
extras_require={
'tests': tests_require,
},
tests_require=tests_require,
setup_requires=setup_requires,
cmdclass={'test': PyTest},
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
| cournape/responses | setup.py | Python | apache-2.0 | 1,534 |
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_win import exceptions as os_win_exc
from oslo_config import cfg
from jacket.tests.compute.unit import fake_instance
from jacket.tests.compute.unit.virt.hyperv import test_base
from jacket.compute.virt.hyperv import livemigrationops
CONF = cfg.CONF
class LiveMigrationOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V LiveMigrationOps class."""
def setUp(self):
super(LiveMigrationOpsTestCase, self).setUp()
self.context = 'fake_context'
self._livemigrops = livemigrationops.LiveMigrationOps()
self._livemigrops._livemigrutils = mock.MagicMock()
self._livemigrops._pathutils = mock.MagicMock()
@mock.patch('compute.virt.hyperv.vmops.VMOps.copy_vm_console_logs')
@mock.patch('compute.virt.hyperv.vmops.VMOps.copy_vm_dvd_disks')
def _test_live_migration(self, mock_get_vm_dvd_paths,
mock_copy_logs, side_effect):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_post = mock.MagicMock()
mock_recover = mock.MagicMock()
fake_dest = mock.sentinel.DESTINATION
self._livemigrops._livemigrutils.live_migrate_vm.side_effect = [
side_effect]
if side_effect is os_win_exc.HyperVException:
self.assertRaises(os_win_exc.HyperVException,
self._livemigrops.live_migration,
self.context, mock_instance, fake_dest,
mock_post, mock_recover, False, None)
mock_recover.assert_called_once_with(self.context, mock_instance,
fake_dest, False)
else:
self._livemigrops.live_migration(context=self.context,
instance_ref=mock_instance,
dest=fake_dest,
post_method=mock_post,
recover_method=mock_recover)
mock_copy_logs.assert_called_once_with(mock_instance.name,
fake_dest)
mock_live_migr = self._livemigrops._livemigrutils.live_migrate_vm
mock_live_migr.assert_called_once_with(mock_instance.name,
fake_dest)
mock_post.assert_called_once_with(self.context, mock_instance,
fake_dest, False)
def test_live_migration(self):
self._test_live_migration(side_effect=None)
def test_live_migration_exception(self):
self._test_live_migration(side_effect=os_win_exc.HyperVException)
@mock.patch('compute.virt.hyperv.volumeops.VolumeOps'
'.ebs_root_in_block_devices')
@mock.patch('compute.virt.hyperv.imagecache.ImageCache.get_cached_image')
@mock.patch('compute.virt.hyperv.volumeops.VolumeOps'
'.initialize_volumes_connection')
def test_pre_live_migration(self, mock_initialize_connection,
mock_get_cached_image,
mock_ebs_root_in_block_devices):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.image_ref = "fake_image_ref"
mock_ebs_root_in_block_devices.return_value = None
CONF.set_override('use_cow_images', True)
self._livemigrops.pre_live_migration(
self.context, mock_instance,
block_device_info=mock.sentinel.BLOCK_INFO,
network_info=mock.sentinel.NET_INFO)
check_config = (
self._livemigrops._livemigrutils.check_live_migration_config)
check_config.assert_called_once_with()
mock_ebs_root_in_block_devices.assert_called_once_with(
mock.sentinel.BLOCK_INFO)
mock_get_cached_image.assert_called_once_with(self.context,
mock_instance)
mock_initialize_connection.assert_called_once_with(
mock.sentinel.BLOCK_INFO)
@mock.patch('compute.virt.hyperv.volumeops.VolumeOps.disconnect_volumes')
def test_post_live_migration(self, mock_disconnect_volumes):
self._livemigrops.post_live_migration(
self.context, mock.sentinel.instance,
mock.sentinel.block_device_info)
mock_disconnect_volumes.assert_called_once_with(
mock.sentinel.block_device_info)
self._livemigrops._pathutils.get_instance_dir.assert_called_once_with(
mock.sentinel.instance.name, create_dir=False, remove_dir=True)
@mock.patch('compute.virt.hyperv.vmops.VMOps.log_vm_serial_output')
def test_post_live_migration_at_destination(self, mock_log_vm):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._livemigrops.post_live_migration_at_destination(
self.context, mock_instance, network_info=mock.sentinel.NET_INFO,
block_migration=mock.sentinel.BLOCK_INFO)
mock_log_vm.assert_called_once_with(mock_instance.name,
mock_instance.uuid)
| HybridF5/jacket | jacket/tests/compute/unit/virt/hyperv/test_livemigrationops.py | Python | apache-2.0 | 5,845 |
"""
Models initializer.
"""
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
from server import app
from utils import time_to_json
from errors import DataBaseException
DB = SQLAlchemy(app)
def db_factory_func(func):
"""
Database connection decorator. Creates a database connection and
wraps it with try/expect and gives to given function
:param func: function to decorate.
"""
@wraps(func)
def wrapper(*args, **kw):
"""
Wrapper function for database operations.
"""
try:
conn = DB.engine.connect()
result = func(conn=conn, *args, **kw)
if result is not None:
return [time_to_json(dict(r)) for r in result]
return result
finally:
if conn is not None:
conn.close()
return wrapper
class BaseModel:
"""
Base class for the data manipulation and database operations.
"""
def __init__(self, table, fields, primary_key=None, init_table=False):
self.table_name = table
self.fields = fields
self.primary_key = primary_key
if init_table:
self.__init_table()
@db_factory_func
def __init_table(self, conn=None):
"""
Checks If the table exists and if it is not created yet, creates the table.
:param conn: Connection from the db_factory_func.
"""
try:
result = conn.execute("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema='public'
AND table_type='BASE TABLE';
""")
result = [dict(r) for r in result]
result = [r.get("table_name") for r in result]
if self.table_name not in result:
table_fields = []
for field in self.fields:
table_fields.append(field + " " + self.fields[field])
if self.primary_key:
table_fields.append(
"PRIMARY KEY (" + str.join(", ", self.primary_key) + ")")
init_query = "CREATE TABLE {} ( {} )".format(
self.table_name, str.join(", ", table_fields))
conn.execute(init_query)
except Exception as sql_err:
print(sql_err)
@db_factory_func
def create(self, conn=None, data=None):
"""
Creates a and inserts a database row. Given data dictionary fields must be in database fields.
:param conn: Connection from the db_factory_func.
:param data: Relevant data to insert.
"""
try:
if data:
values = []
for field in data:
if self.fields.get(field) is not None:
values.append("%({})s".format(field))
if values:
sql_statement = "INSERT INTO {} ({}) VALUES({})".format(self.table_name,
str.join(
", ", data.keys()),
str.join(", ", values))
try:
conn.execute(sql_statement, data)
except Exception as sql_err:
print(sql_err)
raise DataBaseException("sql query error.")
else:
raise DataBaseException("data provided is not correct.")
else:
raise DataBaseException("data is empty.")
except DataBaseException:
raise
@db_factory_func
def find(self, conn=None, query="", limit=0, sort_by="", return_cols=None, offset=None):
"""
Finds and retrieves data with given query from database.
:param conn: Connection from the db_factory_func.
:param query: where sql query string.
:param limit: sql limit value for select.
:param sort_by: sql sort information.
:param return_cols: array of fields to return.
:param offset: sql offset value.
"""
try:
selected_cols = "*"
if return_cols:
selected_cols = []
for col in return_cols:
if self.fields.get(col) is not None:
selected_cols.append(col)
else:
print("%s is not a valid column name!" % col)
if selected_cols:
selected_cols = str.join(",", selected_cols)
else:
raise DataBaseException("Invalid column selection.")
sql_statement = "SELECT {} FROM {} ".format(
selected_cols, self.table_name)
if query:
sql_statement += " WHERE {} ".format(query)
if limit > 0:
sql_statement += " LIMIT {} ".format(limit)
if offset:
sql_statement += " OFFSET {} ".format(offset)
if sort_by:
sql_statement += " ORDER BY {} ".format(sort_by)
try:
print(sql_statement)
return conn.execute(sql_statement)
except Exception as sql_err:
print(sql_err)
raise DataBaseException("sql query error.")
except DataBaseException:
raise
def find_one(self, query=""):
"""
Finds one element from database with a given query.
:param query: where sql query string.
"""
return self.find(query=query, limit=1)
def find_by_id(self, _id):
"""
Finds one element from database with a given id.
:param id: element id number.
"""
return self.find_one(query="id=%s" % _id)
@db_factory_func
def update(self, conn=None, data=None, query="", return_cols=None):
"""
Finds and updates the rows with given query.
:param conn: Connection from the db_factory_func.
:param data: Relevant data to update.
:param query: where sql query string.
:param return_cols: array of fields to return.
"""
try:
if data:
sql_statement = "UPDATE {} SET ".format(self.table_name)
values = []
for field in data:
if self.fields.get(field) is not None:
values.append("{}=%({})s".format(field, field))
if values:
sql_statement += str.join(", ", values)
else:
raise DataBaseException("No valid field is provided.")
if query:
sql_statement += " WHERE {} ".format(query)
if return_cols:
return_cols = [
col for col in return_cols if col in self.fields.keys()]
if return_cols:
sql_statement += " RETURNING {} ".format(
str.join(", ", return_cols))
else:
raise DataBaseException("Invalid column return!")
try:
return conn.execute(sql_statement, data)
except Exception as sql_err:
print(sql_err)
raise DataBaseException("sql query error.")
else:
raise DataBaseException("No data to update.")
except DataBaseException:
raise
def update_by_id(self, _id, data=None, return_cols=None):
"""
Update a row with id.
:param id: element id number.
:param data: Relevant data to update.
:param return_cols: array of fields to return.
"""
return self.update(data=data, query=("id=%s" % _id), return_cols=return_cols)[0]
@db_factory_func
def delete(self, conn=None, query="", return_cols=None):
"""
Deletes a row with given query.
:param conn: Connection from the db_factory_func.
:param query: where sql query string.
:param return_cols: array of fields to return.
"""
sql_statement = "DELETE FROM {} ".format(self.table_name)
if query:
sql_statement += " WHERE {} ".format(query)
if return_cols:
return_cols = [
col for col in return_cols if col in self.fields.keys()]
if return_cols:
sql_statement += " RETURNING {} ".format(str.join(", ", return_cols))
else:
raise DataBaseException("Invalid field return!")
try:
return conn.execute(sql_statement)
except Exception as sql_err:
print(sql_err)
raise DataBaseException("sql query error.")
def delete_by_id(self, _id, return_cols=None):
"""
Deletes a row with the given id.
:param id: element id number.
:param return_cols: array of fields to return.
"""
return self.delete(query=("id=%s" % _id), return_cols=return_cols)[0]
| ITU-DB-MANAGEMENT-HM/itunder-backend-rest | models/base_model.py | Python | apache-2.0 | 9,264 |
# Copyright 2017 Josh Karlin. All rights reserved.
# Use of this source code is governed by the Apache license found in the LICENSE
# file.
import argparse
import datetime
import getopt
import json
import sys
import tempfile
import threading
import time
import urllib.request
import urllib.parse
gFileCache = None;
# A key/value store that stores objects to disk in temporary objects
# for 30 minutes.
class FileCache:
def __init__(self):
self.store = {}
threading.Timer(15 * 60, self.gc).start();
def put(self, url, data):
f = tempfile.TemporaryFile();
f.write(data);
self.store[url] = (f, datetime.datetime.now());
def get(self, url):
if not url in self.store:
return ''
(f, timestamp) = self.store[url]
f.seek(0);
return f.read();
def gc(self):
threading.Timer(15 * 60, self.gc).start();
expired = datetime.datetime.now() - datetime.timedelta(minutes=30);
remove = []
for url, (f, timestamp) in self.store.items():
if timestamp < expired:
remove.append(url)
for url in remove:
self.store.pop(url);
def cacheResponses(should_cache):
global gFileCache
if not should_cache:
gFileCache = None;
return
if gFileCache:
return
gFileCache = FileCache();
# Retrieve the url by first trying to cache and falling back to the network.
def retrieve(url):
global gFileCache
if gFileCache:
cached_response = gFileCache.get(url);
if (cached_response):
return cached_response.decode('utf8');
response = None
try:
if len(url) > 1500:
short_url = url.split('?')[0]
data = url.split('?')[1]
response = urllib.request.urlopen(short_url, data=data.encode('utf-8'), timeout=3)
else:
response = urllib.request.urlopen(url, timeout=3)
except error:
return ''
result = response.read()
if gFileCache:
gFileCache.put(url, result);
return result.decode('utf8');
def getSignatureFor(src_file, method):
url = ('https://cs.chromium.org/codesearch/json'
'?annotation_request=b'
'&file_spec=b'
'&package_name=chromium'
'&name={file_name}'
'&file_spec=e'
'&type=b'
'&id=1'
'&type=e'
'&label='
'&follow_branches=false'
'&annotation_request=e')
url = url.format(file_name=urllib.parse.quote(src_file, safe=''))
result = retrieve(url);
if not result:
return ''
result = json.loads(result)['annotation_response'][0]
for snippet in result.get('annotation', []):
if not 'type' in snippet:
continue
if 'xref_signature' in snippet:
signature = snippet['xref_signature']['signature']
if '%s(' % method in signature:
return signature
elif 'internal_link' in snippet:
signature = snippet['internal_link']['signature']
if '::%s' % method in signature or 'class-%s' % method in signature:
return signature
return ''
def getCallGraphFor(signature):
url = ('https://cs.chromium.org/codesearch/json'
'?call_graph_request=b'
'&signature={signature}'
'&file_spec=b'
'&package_name=chromium'
'&name=.'
'&file_spec=e'
'&max_num_results=500'
'&call_graph_request=e')
url = url.format(signature=urllib.parse.quote(signature, safe=''))
result = retrieve(url);
if not result:
return {}
result = json.loads(result)['call_graph_response'][0];
node = result['node'];
callers = [];
last_signature = ''
if not 'children' in node:
return callers
for child in node['children']:
if child['signature'] == last_signature:
continue
if not 'snippet_file_path' in child:
continue
caller = {}
caller['filename'] = child['snippet_file_path'];
caller['line'] = child['call_site_range']['start_line']
caller['col'] = child['call_site_range']['start_column']
caller['text'] = child['snippet']['text']['text']
caller['calling_method'] = child['identifier']
caller['calling_signature'] = child['signature']
last_signature = child['signature']
caller['display_name'] = child['display_name']
callers.append(caller)
return callers
def getRefForMatch(filename, match):
ref = {'filename': filename, 'line': match['line_number'], 'signature': match['signature']}
if 'line_text' in match:
ref['line_text'] = match['line_text']
return ref;
def getXrefsFor(signature):
url = ('https://cs.chromium.org/codesearch/json'
'?xref_search_request=b'
'&query={signature}'
'&file_spec=b'
'&name=.'
'&package_name=chromium'
'&file_spec=e'
'&max_num_results=500'
'&xref_search_request=e')
url = url.format(signature=urllib.parse.quote(signature, safe=''))
result = retrieve(url);
if not result:
return {}
result = json.loads(result)['xref_search_response'][0]
status = result['status']
if not 'search_result' in result:
return {}
search_results = result['search_result']
xrefs = {}
for file_result in search_results:
filename = file_result['file']['name']
for match in file_result['match']:
if match['type'] == 'HAS_DEFINITION':
xrefs['definition'] = getRefForMatch(filename, match);
elif match['type'] == 'HAS_DECLARATION':
xrefs['declaration'] = getRefForMatch(filename, match);
elif match['type'] == 'OVERRIDDEN_BY':
xrefs.setdefault('overrides', []);
xrefs['overrides'].append(getRefForMatch(filename, match));
elif match['type'] == 'REFERENCED_AT':
xrefs.setdefault('references', []);
xrefs['references'].append(getRefForMatch(filename, match));
return xrefs
def logAndExit(msg):
print(msg);
sys.exit(2);
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Searches Chromium Code Search for X-Refs.')
parser.add_argument('-p', '--path',
help='The path to this file starting with src/')
parser.add_argument('-w', '--word',
help='The word to search for in the file denoted by the path argument. You must also specify -p')
parser.add_argument('-s', '--signature',
help='A signature provided from a previous search. No -p or -w arguments required.')
args = parser.parse_args()
signature = args.signature;
results = {}
if not signature:
if bool(args.path) ^ bool(args.word):
print("Both path and word must be supplied if one is supplied");
sys.exit(2);
signature = getSignatureFor(args.path, args.word);
results['signature'] = signature
if not signature:
logAndExit("Could not find signature for %s" % (args.word))
results['xrefs'] = getXrefsFor(signature);
results['callers'] = getCallGraphFor(signature);
print(json.dumps(results))
| karlinjf/ChromiumXRefs | lib/chromium_code_search.py | Python | apache-2.0 | 7,067 |
'''/* UVa problem:
*
* Topic:
*
* Level:
*
* Brief problem description:
*
*
*
* Solution Summary:
*
*
*
* Used Resources:
*
*
*
* I hereby certify that I have produced the following solution myself
* using the resources listed above in accordance with the CMPUT 403
* collaboration policy.
*
* --- Dennis Truong
*/'''
pyg = 'ay'
original = raw_input('Enter a word:')
if len(original) > 0 and original.isalpha():
word = original.lower()
first = word[0]
if first == ('a' or 'e' or 'i' or 'o' or 'u'):
new_word = word + pyg
print new_word
else:
new_word = word[1:] + first + pyg
print new_word
else:
print 'empty'
| DT9/programming-problems | 2017/uva/403/temp/UVa492.py | Python | apache-2.0 | 703 |
from qgl2.qgl2 import qgl2decl, qgl2main, qreg
from qgl2.qgl2 import QRegister
from qgl2.qgl1 import X, Y, Z, Id, Utheta
from itertools import product
@qgl2decl
def cond_helper(q: qreg, cond):
if cond:
X(q)
@qgl2decl
def t1():
"""
Correct result is [ X(q1) ]
"""
q1 = QRegister('q1')
cond_helper(q1, False)
X(q1)
@qgl2decl
def t2():
"""
Correct result is [ X(q1) ]
"""
q1 = QRegister('q1')
q2 = QRegister('q2')
# We're not going to reference q2 anywhere,
# just to make sure that the compiler doesn't
# freak out
X(q1)
@qgl2decl
def t3():
"""
Like t2, but with a function call
"""
q1 = QRegister('q1')
q2 = QRegister('q2')
cond_helper(q1, True)
@qgl2decl
def t4():
"""
Like t3, but the function call does nothing
"""
q1 = QRegister('q1')
q2 = QRegister('q2')
cond_helper(q1, False)
X(q1) # need to do something
@qgl2decl
def t5():
"""
Like t3, but the function call does nothing
"""
q1 = QRegister('q1')
q2 = QRegister('q2')
# don't do anything at all
| BBN-Q/pyqgl2 | test/code/bugs/84.py | Python | apache-2.0 | 1,119 |
#!/usr/bin/env python3
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['lg_builder'],
package_dir={'': 'src'},
scripts=['scripts/lg-ros-build'],
install_requires=['catkin_pkg', 'python-debian', 'rospkg']
)
setup(**d)
| EndPointCorp/lg_ros_nodes | lg_builder/setup.py | Python | apache-2.0 | 322 |
#kpbochenek@gmail.com
def common_words(first, second):
dd = set()
for s in first.split(","): dd.add(s)
return ",".join(sorted([w for w in second.split(",") if w in dd]))
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert common_words("hello,world", "hello,earth") == "hello", "Hello"
assert common_words("one,two,three", "four,five,six") == "", "Too different"
assert common_words("one,two,three", "four,five,one,two,six,three") == "one,three,two", "1 2 3"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
| kpbochenek/empireofcode | common_words.py | Python | apache-2.0 | 640 |
from fancontroller.fan_controller import Thermostat, STATE_ON, STATE_OFF
Thermostat
STATE_ON
STATE_OFF | isdal/raspberrypi-fan-controller | fancontroller/__init__.py | Python | apache-2.0 | 103 |
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2014 Hewlett-Packard Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Access Control Lists (ACL's) control access the API server."""
import pecan
from ceilometer.openstack.common import policy
_ENFORCER = None
def enforce(policy_name, request):
"""Return the user and project the request should be limited to.
:param request: HTTP request
:param policy_name: the policy name to validate authz against.
"""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer()
_ENFORCER.load_rules()
rule_method = "telemetry:" + policy_name
headers = request.headers
policy_dict = dict()
policy_dict['roles'] = headers.get('X-Roles', "").split(",")
policy_dict['target.user_id'] = (headers.get('X-User-Id'))
policy_dict['target.project_id'] = (headers.get('X-Project-Id'))
for rule_name in _ENFORCER.rules.keys():
if rule_method == rule_name:
if not _ENFORCER.enforce(
rule_name,
{},
policy_dict):
pecan.core.abort(status_code=403,
detail='RBAC Authorization Failed')
# TODO(fabiog): these methods are still used because the scoping part is really
# convoluted and difficult to separate out.
def get_limited_to(headers):
"""Return the user and project the request should be limited to.
:param headers: HTTP headers dictionary
:return: A tuple of (user, project), set to None if there's no limit on
one of these.
"""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer()
_ENFORCER.load_rules()
policy_dict = dict()
policy_dict['roles'] = headers.get('X-Roles', "").split(",")
policy_dict['target.user_id'] = (headers.get('X-User-Id'))
policy_dict['target.project_id'] = (headers.get('X-Project-Id'))
if not _ENFORCER.enforce('segregation',
{},
policy_dict):
return headers.get('X-User-Id'), headers.get('X-Project-Id')
return None, None
def get_limited_to_project(headers):
"""Return the project the request should be limited to.
:param headers: HTTP headers dictionary
:return: A project, or None if there's no limit on it.
"""
return get_limited_to(headers)[1]
| Juniper/ceilometer | ceilometer/api/rbac.py | Python | apache-2.0 | 2,916 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from paddle.utils import gast
from .logging_utils import warn
from .utils import is_paddle_api, is_dygraph_api, is_numpy_api, index_in_list, ast_to_source_code
__all__ = ['AstNodeWrapper', 'NodeVarType', 'StaticAnalysisVisitor']
class NodeVarType(object):
"""
Enum class of python variable types. We have to know some variable types
during compile time to transfer AST. For example, a string variable and a
tensor variable in if clause may lead to different conversion from dygraph
to static graph.
"""
ERROR = -1 # Returns when static analysis gets error
UNKNOWN = 0 # Reserve for AST nodes have not known the type
STATEMENT = 1 # For nodes representing statement (non-variable type)
CALLABLE = 2
# python data types
NONE = 100
BOOLEAN = 101
INT = 102
FLOAT = 103
STRING = 104
TENSOR = 105
NUMPY_NDARRAY = 106
# python collections
LIST = 200
SET = 201
DICT = 202
PADDLE_DYGRAPH_API = 300
PADDLE_CONTROL_IF = 301
PADDLE_CONTROL_WHILE = 302
PADDLE_CONTROL_FOR = 303
# Paddle API may not be visible to get source code.
# We use this enum value to denote the type return by a Paddle API
PADDLE_RETURN_TYPES = 304
# If node.node_var_type in TENSOR_TYPES, it can be considered as tensor-dependent.
TENSOR_TYPES = {TENSOR, PADDLE_RETURN_TYPES}
Annotation_map = {
"Tensor": TENSOR,
"paddle.Tensor": TENSOR,
"int": INT,
"float": FLOAT,
"bool": BOOLEAN,
"str": STRING
}
@staticmethod
def binary_op_output_type(in_type1, in_type2):
if in_type1 == in_type2:
return in_type1
if in_type1 == NodeVarType.UNKNOWN:
return in_type2
if in_type2 == NodeVarType.UNKNOWN:
return in_type1
supported_types = [
NodeVarType.BOOLEAN, NodeVarType.INT, NodeVarType.FLOAT,
NodeVarType.NUMPY_NDARRAY, NodeVarType.TENSOR,
NodeVarType.PADDLE_RETURN_TYPES
]
if in_type1 not in supported_types:
return NodeVarType.UNKNOWN
if in_type2 not in supported_types:
return NodeVarType.UNKNOWN
forbidden_types = [NodeVarType.NUMPY_NDARRAY, NodeVarType.TENSOR]
if in_type1 in forbidden_types and in_type2 in forbidden_types:
return NodeVarType.UNKNOWN
return max(in_type1, in_type2)
@staticmethod
def type_from_annotation(annotation):
annotation_str = ast_to_source_code(annotation).strip()
if annotation_str in NodeVarType.Annotation_map:
return NodeVarType.Annotation_map[annotation_str]
# raise warning if not found
warn("Currently we don't support annotation: %s" % annotation_str)
return NodeVarType.UNKNOWN
class AstNodeWrapper(object):
"""
Wrapper for python gast.node. We need a node wrapper because gast.node
doesn't store all required information when we are transforming AST.
We should collect additional information which the actual transformation
needs.
"""
def __init__(self, node):
self.node = node
self.parent = None
self.children = []
self.node_var_type = {NodeVarType.UNKNOWN}
class AstVarScope(object):
"""
AstVarScope is a class holding the map from current scope variable to its
type.
"""
SCOPE_TYPE_SCRIPT = 0
SCOPE_TYPE_FUNCTION = 1
SCOPE_TYPE_CLASS = 2
def __init__(self,
scope_name='',
scope_type=SCOPE_TYPE_SCRIPT,
parent_scope=None):
self.sub_scopes = []
self.name_to_id = {}
self.id_to_type = {}
self.cur_id = 0
self.scope_name = scope_name
self.scope_type = scope_type
self.parent_scope = parent_scope
if parent_scope is not None:
parent_scope.sub_scopes.append(self)
def add_var_type(self, var_name, node_var_type):
var_type = self.get_var_type(var_name)
if var_type == {NodeVarType.UNKNOWN}:
self.set_var_type(var_name, node_var_type)
else:
if isinstance(node_var_type, set):
var_type.update(node_var_type)
else:
var_type.add(node_var_type)
def set_var_type(self, var_name, node_var_type):
if var_name in self.name_to_id:
num_id = self.name_to_id[var_name]
else:
num_id = self.cur_id
self.cur_id += 1
self.name_to_id[var_name] = num_id
self.id_to_type[num_id] = node_var_type if isinstance(
node_var_type, set) else {node_var_type}
def get_var_type(self, var_name):
if var_name in self.name_to_id:
num_id = self.name_to_id[var_name]
return self.id_to_type[num_id]
if self.parent_scope is None:
return {NodeVarType.UNKNOWN}
return self.parent_scope.get_var_type(var_name)
class AstVarEnv(object):
"""
A class maintains scopes and mapping from name strings to type.
"""
def __init__(self):
self.cur_scope = AstVarScope()
def enter_scope(self, scope_name, scope_type):
self.cur_scope = AstVarScope(
scope_name, scope_type, parent_scope=self.cur_scope)
return self.cur_scope
def exit_scope(self):
assert self.cur_scope.parent_scope is not None, "Call exit_scope in "\
"AstVarEnv when current scope doesn't have parent scope."
self.cur_scope = self.cur_scope.parent_scope
return self.cur_scope
def get_parent_scope(self):
assert self.cur_scope.parent_scope is not None, "Call parent_scope in "\
"AstVarEnv when current scope doesn't have parent scope."
return self.cur_scope.parent_scope
def add_var_type(self, var_name, node_var_type):
self.cur_scope.add_var_type(var_name, node_var_type)
def set_var_type(self, var_name, node_var_type):
self.cur_scope.set_var_type(var_name, node_var_type)
def get_var_type(self, var_name):
return self.cur_scope.get_var_type(var_name)
def get_scope_var_type(self):
'''
Returns a dict mapping from variable name to type. Used for debug and
test.
'''
cur_scope_dict = {}
for name in self.cur_scope.name_to_id:
node_var_type = self.cur_scope.get_var_type(name)
cur_scope_dict[name] = node_var_type
return cur_scope_dict
class StaticAnalysisVisitor(object):
"""
A class that does static analysis
"""
def __init__(self, ast_root=None):
if ast_root is not None:
self.run(ast_root)
def run(self, ast_root):
self.node_wrapper_root = None
self.ancestor_wrappers = []
self.node_to_wrapper_map = {}
self.var_env = AstVarEnv()
self.dfs_visit(ast_root)
def dfs_visit(self, node):
# AST reuses some gast.nodes, such as Param node of expr_context
if node not in self.node_to_wrapper_map:
cur_wrapper = AstNodeWrapper(node)
self.node_to_wrapper_map[node] = cur_wrapper
else:
cur_wrapper = self.node_to_wrapper_map[node]
if self.node_wrapper_root is None:
self.node_wrapper_root = cur_wrapper
if len(self.ancestor_wrappers) != 0:
last_wrapper = self.ancestor_wrappers[-1]
last_wrapper.children.append(cur_wrapper)
cur_wrapper.parent = last_wrapper
self.ancestor_wrappers.append(cur_wrapper)
for child in gast.iter_child_nodes(node):
if isinstance(child, gast.FunctionDef) or isinstance(
child, gast.AsyncFunctionDef):
# TODO: current version is function name mapping to its type
# consider complex case involving parameters
self.var_env.enter_scope(child.name,
AstVarScope.SCOPE_TYPE_FUNCTION)
func_type = self.dfs_visit(child)
self.var_env.exit_scope()
else:
self.dfs_visit(child)
self.ancestor_wrappers.pop()
cur_wrapper.node_var_type = self._get_node_var_type(cur_wrapper)
return cur_wrapper.node_var_type
def get_node_wrapper_root(self):
return self.node_wrapper_root
def get_node_to_wrapper_map(self):
return self.node_to_wrapper_map
def get_var_env(self):
return self.var_env
def is_tensor_node(self, node):
tensor_types = {NodeVarType.TENSOR, NodeVarType.PADDLE_RETURN_TYPES}
node_wrapper = self.node_to_wrapper_map.get(node, None)
if node_wrapper is None:
return False
if node_wrapper.node_var_type & tensor_types:
return True
def _get_constant_node_type(self, node):
assert isinstance(node, gast.Constant), \
"Type of input node should be gast.Constant, but received %s" % type(node)
# singleton: None, True or False
if node.value is None:
return {NodeVarType.NONE}
if isinstance(node.value, bool):
return {NodeVarType.BOOLEAN}
if isinstance(node.value, int):
return {NodeVarType.INT}
if isinstance(node.value, float):
return {NodeVarType.FLOAT}
if isinstance(node.value, str):
return {NodeVarType.STRING}
return {NodeVarType.UNKNOWN}
def _get_node_var_type(self, cur_wrapper):
node = cur_wrapper.node
if isinstance(node, gast.Constant):
return self._get_constant_node_type(node)
if isinstance(node, gast.BoolOp):
return {NodeVarType.BOOLEAN}
if isinstance(node, gast.Compare):
return {NodeVarType.BOOLEAN}
if isinstance(node, gast.Dict):
return {NodeVarType.DICT}
if isinstance(node, gast.Set):
return {NodeVarType.SET}
if isinstance(node, gast.UnaryOp):
return self.node_to_wrapper_map[node.operand].node_var_type
if isinstance(node, gast.BinOp):
left_type = self.node_to_wrapper_map[node.left].node_var_type
right_type = self.node_to_wrapper_map[node.right].node_var_type
result_type = set()
for l in left_type:
for r in right_type:
result_type.add(NodeVarType.binary_op_output_type(l, r))
return result_type
if isinstance(node, gast.Assign):
ret_type = self.node_to_wrapper_map[node.value].node_var_type
for target in node.targets:
if isinstance(target, gast.Name):
self.node_to_wrapper_map[target].node_var_type = ret_type
self.var_env.set_var_type(target.id, ret_type)
return ret_type
if isinstance(node, gast.AnnAssign):
# TODO(0x45f): To determine whether need to support assignment statements
# like `self.x: float = 2.1`.
ret_type = {NodeVarType.type_from_annotation(node.annotation)}
# if annotation and value(Constant) are diffent type, we use value type
if node.value:
ret_type = self.node_to_wrapper_map[node.value].node_var_type
if isinstance(node.target, gast.Name):
self.node_to_wrapper_map[node.target].node_var_type = ret_type
self.var_env.set_var_type(node.target.id, ret_type)
return ret_type
if isinstance(node, gast.Name):
if node.id == "None":
return {NodeVarType.NONE}
if node.id in {"True", "False"}:
return {NodeVarType.BOOLEAN}
# If node is child of functionDef.arguments
parent_node_wrapper = cur_wrapper.parent
if parent_node_wrapper and isinstance(parent_node_wrapper.node,
gast.arguments):
return self._get_func_argument_type(parent_node_wrapper, node)
return self.var_env.get_var_type(node.id)
if isinstance(node, gast.Return):
# If return nothing:
if node.value is None:
return {NodeVarType.NONE}
return_type = self.node_to_wrapper_map[node.value].node_var_type
assert self.var_env.cur_scope.scope_type == AstVarScope.SCOPE_TYPE_FUNCTION, "Return at non-function scope"
func_name = self.var_env.cur_scope.scope_name
parent_scope = self.var_env.get_parent_scope()
parent_scope.add_var_type(func_name, return_type)
return return_type
if isinstance(node, gast.Call):
if is_dygraph_api(node):
if isinstance(node.func, gast.Attribute):
if node.func.attr == "to_variable":
return {NodeVarType.TENSOR}
if is_paddle_api(node):
return {NodeVarType.PADDLE_RETURN_TYPES}
if is_numpy_api(node):
# In this simple version we assume numpy api returns nd-array
return {NodeVarType.NUMPY_NDARRAY}
if isinstance(node.func, gast.Name):
return self.var_env.get_var_type(node.func.id)
if isinstance(node, gast.Subscript):
if self.is_tensor_node(node.value):
return {NodeVarType.TENSOR}
return {NodeVarType.STATEMENT}
def _get_func_argument_type(self, parent_node_wrapper, node):
"""
Returns type information by parsing annotation or default values.
For example:
1. parse by default values.
foo(x, y=1, z='s') -> x: UNKNOWN, y: INT, z: STR
2. parse by Py3 type annotation.
foo(x: Tensor, y: int, z: str) -> x: Tensor, y: INT, z: STR
3. parse by type annotation and default values.
foo(x: Tensor, y: int, z: str = 'abc') -> x: Tensor, y: INT, z: STR
NOTE: Currently, we only support Tensor, int, bool, float, str et.al.
Other complicate types will be supported later.
"""
assert isinstance(node, gast.Name)
parent_node = parent_node_wrapper.node
var_type = {NodeVarType.UNKNOWN}
if node.annotation is not None:
var_type = {NodeVarType.type_from_annotation(node.annotation)}
self.var_env.set_var_type(node.id, var_type)
# if annotation and value(Constant) are diffent type, we use value type
if parent_node.defaults:
index = index_in_list(parent_node.args, node)
args_len = len(parent_node.args)
if index != -1 and args_len - index <= len(parent_node.defaults):
defaults_node = parent_node.defaults[index - args_len]
if isinstance(defaults_node, gast.Constant):
var_type = self._get_constant_node_type(defaults_node)
# Add node with identified type into cur_env.
self.var_env.set_var_type(node.id, var_type)
return var_type
| luotao1/Paddle | python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py | Python | apache-2.0 | 15,858 |
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple, hierarchical distributed counter."""
import threading
import time
from typing import Dict, Mapping, Optional, Union
from acme import core
Number = Union[int, float]
class Counter(core.Saveable):
"""A simple counter object that can periodically sync with a parent."""
def __init__(self,
parent: Optional['Counter'] = None,
prefix: str = '',
time_delta: float = 1.0,
return_only_prefixed: bool = False):
"""Initialize the counter.
Args:
parent: a Counter object to cache locally (or None for no caching).
prefix: string prefix to use for all local counts.
time_delta: time difference in seconds between syncing with the parent
counter.
return_only_prefixed: if True, and if `prefix` isn't empty, return counts
restricted to the given `prefix` on each call to `increment` and
`get_counts`. The `prefix` is stripped from returned count names.
"""
self._parent = parent
self._prefix = prefix
self._time_delta = time_delta
# Hold local counts and we'll lock around that.
# These are counts to be synced to the parent and the cache.
self._counts = {}
self._lock = threading.Lock()
# We'll sync periodically (when the last sync was more than self._time_delta
# seconds ago.)
self._cache = {}
self._last_sync_time = 0.0
self._return_only_prefixed = return_only_prefixed
def increment(self, **counts: Number) -> Dict[str, Number]:
"""Increment a set of counters.
Args:
**counts: keyword arguments specifying count increments.
Returns:
The [name, value] mapping of all counters stored, i.e. this will also
include counts that were not updated by this call to increment.
"""
with self._lock:
for key, value in counts.items():
self._counts.setdefault(key, 0)
self._counts[key] += value
return self.get_counts()
def get_counts(self) -> Dict[str, Number]:
"""Return all counts tracked by this counter."""
now = time.time()
# TODO(b/144421838): use futures instead of blocking.
if self._parent and (now - self._last_sync_time) > self._time_delta:
with self._lock:
counts = _prefix_keys(self._counts, self._prefix)
# Reset the local counts, as they will be merged into the parent and the
# cache.
self._counts = {}
self._cache = self._parent.increment(**counts)
self._last_sync_time = now
# Potentially prefix the keys in the counts dictionary.
counts = _prefix_keys(self._counts, self._prefix)
# If there's no prefix make a copy of the dictionary so we don't modify the
# internal self._counts.
if not self._prefix:
counts = dict(counts)
# Combine local counts with any parent counts.
for key, value in self._cache.items():
counts[key] = counts.get(key, 0) + value
if self._prefix and self._return_only_prefixed:
counts = dict([(key[len(self._prefix) + 1:], value)
for key, value in counts.items()
if key.startswith(f'{self._prefix}_')])
return counts
def save(self) -> Mapping[str, Mapping[str, Number]]:
return {'counts': self._counts, 'cache': self._cache}
def restore(self, state: Mapping[str, Mapping[str, Number]]):
# Force a sync, if necessary, on the next get_counts call.
self._last_sync_time = 0.
self._counts = state['counts']
self._cache = state['cache']
def _prefix_keys(dictionary: Dict[str, Number], prefix: str):
"""Return a dictionary with prefixed keys.
Args:
dictionary: dictionary to return a copy of.
prefix: string to use as the prefix.
Returns:
Return a copy of the given dictionary whose keys are replaced by
"{prefix}_{key}". If the prefix is the empty string it returns the given
dictionary unchanged.
"""
if prefix:
dictionary = {f'{prefix}_{k}': v for k, v in dictionary.items()}
return dictionary
| deepmind/acme | acme/utils/counting.py | Python | apache-2.0 | 4,636 |
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_config import cfg
from oslo_log import log as logging
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.db import common_db_mixin
from neutron.db import models_v2
LOG = logging.getLogger(__name__)
class DbBasePluginCommon(common_db_mixin.CommonDbMixin):
"""Stores getters and helper methods for db_base_plugin_v2
All private getters and simple helpers like _make_*_dict were moved from
db_base_plugin_v2.
More complicated logic and public methods left in db_base_plugin_v2.
Main purpose of this class is to make getters accessible for Ipam
backends.
"""
@staticmethod
def _generate_mac():
return utils.get_random_mac(cfg.CONF.base_mac.split(':'))
@staticmethod
def _delete_ip_allocation(context, network_id, subnet_id, ip_address):
# Delete the IP address from the IPAllocate table
LOG.debug("Delete allocated IP %(ip_address)s "
"(%(network_id)s/%(subnet_id)s)",
{'ip_address': ip_address,
'network_id': network_id,
'subnet_id': subnet_id})
context.session.query(models_v2.IPAllocation).filter_by(
network_id=network_id,
ip_address=ip_address,
subnet_id=subnet_id).delete()
@staticmethod
def _store_ip_allocation(context, ip_address, network_id, subnet_id,
port_id):
LOG.debug("Allocated IP %(ip_address)s "
"(%(network_id)s/%(subnet_id)s/%(port_id)s)",
{'ip_address': ip_address,
'network_id': network_id,
'subnet_id': subnet_id,
'port_id': port_id})
allocated = models_v2.IPAllocation(
network_id=network_id,
port_id=port_id,
ip_address=ip_address,
subnet_id=subnet_id
)
context.session.add(allocated)
def _make_subnet_dict(self, subnet, fields=None, context=None):
res = {'id': subnet['id'],
'name': subnet['name'],
'tenant_id': subnet['tenant_id'],
'network_id': subnet['network_id'],
'ip_version': subnet['ip_version'],
'cidr': subnet['cidr'],
'subnetpool_id': subnet.get('subnetpool_id'),
'allocation_pools': [{'start': pool['first_ip'],
'end': pool['last_ip']}
for pool in subnet['allocation_pools']],
'gateway_ip': subnet['gateway_ip'],
'enable_dhcp': subnet['enable_dhcp'],
'ipv6_ra_mode': subnet['ipv6_ra_mode'],
'ipv6_address_mode': subnet['ipv6_address_mode'],
'dns_nameservers': [dns['address']
for dns in subnet['dns_nameservers']],
'host_routes': [{'destination': route['destination'],
'nexthop': route['nexthop']}
for route in subnet['routes']],
}
# The shared attribute for a subnet is the same as its parent network
res['shared'] = self._make_network_dict(subnet.networks,
context=context)['shared']
# Call auxiliary extend functions, if any
self._apply_dict_extend_functions(attributes.SUBNETS, res, subnet)
return self._fields(res, fields)
def _make_subnetpool_dict(self, subnetpool, fields=None):
default_prefixlen = str(subnetpool['default_prefixlen'])
min_prefixlen = str(subnetpool['min_prefixlen'])
max_prefixlen = str(subnetpool['max_prefixlen'])
res = {'id': subnetpool['id'],
'name': subnetpool['name'],
'tenant_id': subnetpool['tenant_id'],
'default_prefixlen': default_prefixlen,
'min_prefixlen': min_prefixlen,
'max_prefixlen': max_prefixlen,
'shared': subnetpool['shared'],
'prefixes': [prefix['cidr']
for prefix in subnetpool['prefixes']],
'ip_version': subnetpool['ip_version'],
'default_quota': subnetpool['default_quota']}
return self._fields(res, fields)
def _make_port_dict(self, port, fields=None,
process_extensions=True):
res = {"id": port["id"],
'name': port['name'],
"network_id": port["network_id"],
'tenant_id': port['tenant_id'],
"mac_address": port["mac_address"],
"admin_state_up": port["admin_state_up"],
"status": port["status"],
"fixed_ips": [{'subnet_id': ip["subnet_id"],
'ip_address': ip["ip_address"]}
for ip in port["fixed_ips"]],
"device_id": port["device_id"],
"device_owner": port["device_owner"]}
# Call auxiliary extend functions, if any
if process_extensions:
self._apply_dict_extend_functions(
attributes.PORTS, res, port)
return self._fields(res, fields)
def _get_ipam_subnetpool_driver(self, context, subnetpool=None):
if cfg.CONF.ipam_driver:
return ipam_base.Pool.get_instance(subnetpool, context)
else:
return subnet_alloc.SubnetAllocator(subnetpool, context)
def _get_network(self, context, id):
try:
network = self._get_by_id(context, models_v2.Network, id)
except exc.NoResultFound:
raise n_exc.NetworkNotFound(net_id=id)
return network
def _get_subnet(self, context, id):
try:
subnet = self._get_by_id(context, models_v2.Subnet, id)
except exc.NoResultFound:
raise n_exc.SubnetNotFound(subnet_id=id)
return subnet
def _get_subnetpool(self, context, id):
try:
return self._get_by_id(context, models_v2.SubnetPool, id)
except exc.NoResultFound:
raise n_exc.SubnetPoolNotFound(subnetpool_id=id)
def _get_all_subnetpools(self, context):
# NOTE(tidwellr): see note in _get_all_subnets()
return context.session.query(models_v2.SubnetPool).all()
def _get_port(self, context, id):
try:
port = self._get_by_id(context, models_v2.Port, id)
except exc.NoResultFound:
raise n_exc.PortNotFound(port_id=id)
return port
def _get_dns_by_subnet(self, context, subnet_id):
dns_qry = context.session.query(models_v2.DNSNameServer)
return dns_qry.filter_by(subnet_id=subnet_id).all()
def _get_route_by_subnet(self, context, subnet_id):
route_qry = context.session.query(models_v2.SubnetRoute)
return route_qry.filter_by(subnet_id=subnet_id).all()
def _get_router_gw_ports_by_network(self, context, network_id):
port_qry = context.session.query(models_v2.Port)
return port_qry.filter_by(network_id=network_id,
device_owner=constants.DEVICE_OWNER_ROUTER_GW).all()
def _get_subnets_by_network(self, context, network_id):
subnet_qry = context.session.query(models_v2.Subnet)
return subnet_qry.filter_by(network_id=network_id).all()
def _get_subnets_by_subnetpool(self, context, subnetpool_id):
subnet_qry = context.session.query(models_v2.Subnet)
return subnet_qry.filter_by(subnetpool_id=subnetpool_id).all()
def _get_all_subnets(self, context):
# NOTE(salvatore-orlando): This query might end up putting
# a lot of stress on the db. Consider adding a cache layer
return context.session.query(models_v2.Subnet).all()
def _get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'subnet', limit, marker)
make_subnet_dict = functools.partial(self._make_subnet_dict,
context=context)
return self._get_collection(context, models_v2.Subnet,
make_subnet_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def _make_network_dict(self, network, fields=None,
process_extensions=True, context=None):
res = {'id': network['id'],
'name': network['name'],
'tenant_id': network['tenant_id'],
'admin_state_up': network['admin_state_up'],
'mtu': network.get('mtu', constants.DEFAULT_NETWORK_MTU),
'status': network['status'],
'subnets': [subnet['id']
for subnet in network['subnets']]}
# The shared attribute for a network now reflects if the network
# is shared to the calling tenant via an RBAC entry.
shared = False
matches = ('*',) + ((context.tenant_id,) if context else ())
for entry in network.rbac_entries:
if (entry.action == 'access_as_shared' and
entry.target_tenant in matches):
shared = True
break
res['shared'] = shared
# TODO(pritesh): Move vlan_transparent to the extension module.
# vlan_transparent here is only added if the vlantransparent
# extension is enabled.
if ('vlan_transparent' in network and network['vlan_transparent'] !=
attributes.ATTR_NOT_SPECIFIED):
res['vlan_transparent'] = network['vlan_transparent']
# Call auxiliary extend functions, if any
if process_extensions:
self._apply_dict_extend_functions(
attributes.NETWORKS, res, network)
return self._fields(res, fields)
def _make_subnet_args(self, detail, subnet, subnetpool_id):
gateway_ip = str(detail.gateway_ip) if detail.gateway_ip else None
args = {'tenant_id': detail.tenant_id,
'id': subnet['id'],
'name': subnet['name'],
'network_id': subnet['network_id'],
'ip_version': subnet['ip_version'],
'cidr': str(detail.subnet_cidr),
'subnetpool_id': subnetpool_id,
'enable_dhcp': subnet['enable_dhcp'],
'gateway_ip': gateway_ip}
if subnet['ip_version'] == 6 and subnet['enable_dhcp']:
if attributes.is_attr_set(subnet['ipv6_ra_mode']):
args['ipv6_ra_mode'] = subnet['ipv6_ra_mode']
if attributes.is_attr_set(subnet['ipv6_address_mode']):
args['ipv6_address_mode'] = subnet['ipv6_address_mode']
return args
def _make_fixed_ip_dict(self, ips):
# Excludes from dict all keys except subnet_id and ip_address
return [{'subnet_id': ip["subnet_id"],
'ip_address': ip["ip_address"]}
for ip in ips]
| paninetworks/neutron | neutron/db/db_base_plugin_common.py | Python | apache-2.0 | 12,074 |
#!/usr/bin/python -tt
import commands
import re
import sys
def aray__to_string(ar):
array_string=""
for item in ar:
array_string += " "+item
array_string=array_string.replace(ar[0],"")
return array_string.rstrip().lstrip()
def get_class_path():
#using find /xyz/abc/lib/ -type f \( -name "*.jar" -o -name "*.zip" \) | paste -sd: >> classpath
f=open("classpath")
lines=f.readlines()
return str(lines[0].split("\n")[0])+":."
def __file__log__(s,filename):
f=open(filename,"a")
f.write(str(s)+"\n")
f.flush()
f.close()
def main():
classpath = get_class_path()
(status,output)=commands.getstatusoutput("find "+aray__to_string(sys.argv)+" -type f -name \"*.java\"")
filestocompile = []
filestoignore = []
alllines = []
alllines = output
alllines = alllines.split("\n")
for lines in alllines:
if lines.__contains__("WEB-INF") and lines.__contains__("classes"):
filestocompile.append(lines)
else:
filestoignore.append(lines)
del alllines
for files in filestocompile:
directory = re.findall(r".+classes.",files)[0]
filesname = files.split(directory)[1]
file = "cd "+directory+"; javac -cp "+classpath+" "+filesname;
(status,outout)=commands.getstatusoutput(file)
__file__log__(str(status) + " " + directory + " " + filesname + " - " + files,"comp.py.log")
__file__log__(outout,"debug.compile.log")
if __name__ == "__main__":
main()
| dushmis/misc | python/bulk_compile_all_java.py | Python | apache-2.0 | 1,386 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def query_external_sheets_permanent_table(dataset_id):
# [START bigquery_query_external_sheets_perm]
from google.cloud import bigquery
import google.auth
# Create credentials with Drive & BigQuery API scopes.
# Both APIs must be enabled for your project before running this code.
#
# If you are using credentials from gcloud, you must authorize the
# application first with the following command:
#
# gcloud auth application-default login \
# --scopes=https://www.googleapis.com/auth/drive,https://www.googleapis.com/auth/cloud-platform
credentials, project = google.auth.default(
scopes=[
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/bigquery",
]
)
# Construct a BigQuery client object.
client = bigquery.Client(credentials=credentials, project=project)
# TODO(developer): Set dataset_id to the ID of the dataset to fetch.
# dataset_id = "your-project.your_dataset"
# Configure the external data source.
dataset = client.get_dataset(dataset_id)
table_id = "us_states"
schema = [
bigquery.SchemaField("name", "STRING"),
bigquery.SchemaField("post_abbr", "STRING"),
]
table = bigquery.Table(dataset.table(table_id), schema=schema)
external_config = bigquery.ExternalConfig("GOOGLE_SHEETS")
# Use a shareable link or grant viewing access to the email address you
# used to authenticate with BigQuery (this example Sheet is public).
sheet_url = (
"https://docs.google.com/spreadsheets"
"/d/1i_QCL-7HcSyUZmIbP9E6lO_T5u3HnpLe7dnpHaijg_E/edit?usp=sharing"
)
external_config.source_uris = [sheet_url]
external_config.options.skip_leading_rows = 1 # Optionally skip header row.
external_config.options.range = (
"us-states!A20:B49" # Optionally set range of the sheet to query from.
)
table.external_data_configuration = external_config
# Create a permanent table linked to the Sheets file.
table = client.create_table(table) # Make an API request.
# Example query to find states starting with "W".
sql = 'SELECT * FROM `{}.{}` WHERE name LIKE "W%"'.format(dataset_id, table_id)
query_job = client.query(sql) # Make an API request.
# Wait for the query to complete.
w_states = list(query_job)
print(
"There are {} states with names starting with W in the selected range.".format(
len(w_states)
)
)
# [END bigquery_query_external_sheets_perm]
| googleapis/python-bigquery | samples/query_external_sheets_permanent_table.py | Python | apache-2.0 | 3,135 |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
| Mirantis/pumphouse | setup.py | Python | apache-2.0 | 656 |
"""Common code for Withings."""
import asyncio
from dataclasses import dataclass
import datetime
from datetime import timedelta
from enum import Enum, IntEnum
import logging
import re
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from aiohttp.web import Response
import requests
from withings_api import AbstractWithingsApi
from withings_api.common import (
AuthFailedException,
GetSleepSummaryField,
MeasureGroupAttribs,
MeasureType,
MeasureTypes,
NotifyAppli,
SleepGetSummaryResponse,
UnauthorizedException,
query_measure_groups,
)
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_WEBHOOK_ID,
HTTP_UNAUTHORIZED,
MASS_KILOGRAMS,
PERCENTAGE,
SPEED_METERS_PER_SECOND,
TIME_SECONDS,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.helpers.config_entry_oauth2_flow import (
AUTH_CALLBACK_PATH,
AbstractOAuth2Implementation,
LocalOAuth2Implementation,
OAuth2Session,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.helpers.network import get_url
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.util import dt
from . import const
from .const import Measurement
_LOGGER = logging.getLogger(const.LOG_NAMESPACE)
NOT_AUTHENTICATED_ERROR = re.compile(
f"^{HTTP_UNAUTHORIZED},.*",
re.IGNORECASE,
)
DATA_UPDATED_SIGNAL = "withings_entity_state_updated"
MeasurementData = Dict[Measurement, Any]
class NotAuthenticatedError(HomeAssistantError):
"""Raise when not authenticated with the service."""
class ServiceError(HomeAssistantError):
"""Raise when the service has an error."""
class UpdateType(Enum):
"""Data update type."""
POLL = "poll"
WEBHOOK = "webhook"
@dataclass
class WithingsAttribute:
"""Immutable class for describing withings sensor data."""
measurement: Measurement
measute_type: Enum
friendly_name: str
unit_of_measurement: str
icon: Optional[str]
platform: str
enabled_by_default: bool
update_type: UpdateType
@dataclass
class WithingsData:
"""Represents value and meta-data from the withings service."""
attribute: WithingsAttribute
value: Any
@dataclass
class WebhookConfig:
"""Config for a webhook."""
id: str
url: str
enabled: bool
@dataclass
class StateData:
"""State data held by data manager for retrieval by entities."""
unique_id: str
state: Any
WITHINGS_ATTRIBUTES = [
WithingsAttribute(
Measurement.WEIGHT_KG,
MeasureType.WEIGHT,
"Weight",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.FAT_MASS_KG,
MeasureType.FAT_MASS_WEIGHT,
"Fat Mass",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.FAT_FREE_MASS_KG,
MeasureType.FAT_FREE_MASS,
"Fat Free Mass",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.MUSCLE_MASS_KG,
MeasureType.MUSCLE_MASS,
"Muscle Mass",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.BONE_MASS_KG,
MeasureType.BONE_MASS,
"Bone Mass",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.HEIGHT_M,
MeasureType.HEIGHT,
"Height",
const.UOM_LENGTH_M,
"mdi:ruler",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.TEMP_C,
MeasureType.TEMPERATURE,
"Temperature",
const.UOM_TEMP_C,
"mdi:thermometer",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.BODY_TEMP_C,
MeasureType.BODY_TEMPERATURE,
"Body Temperature",
const.UOM_TEMP_C,
"mdi:thermometer",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SKIN_TEMP_C,
MeasureType.SKIN_TEMPERATURE,
"Skin Temperature",
const.UOM_TEMP_C,
"mdi:thermometer",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.FAT_RATIO_PCT,
MeasureType.FAT_RATIO,
"Fat Ratio",
PERCENTAGE,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.DIASTOLIC_MMHG,
MeasureType.DIASTOLIC_BLOOD_PRESSURE,
"Diastolic Blood Pressure",
const.UOM_MMHG,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SYSTOLIC_MMGH,
MeasureType.SYSTOLIC_BLOOD_PRESSURE,
"Systolic Blood Pressure",
const.UOM_MMHG,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.HEART_PULSE_BPM,
MeasureType.HEART_RATE,
"Heart Pulse",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SPO2_PCT,
MeasureType.SP02,
"SP02",
PERCENTAGE,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.HYDRATION,
MeasureType.HYDRATION,
"Hydration",
MASS_KILOGRAMS,
"mdi:water",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.PWV,
MeasureType.PULSE_WAVE_VELOCITY,
"Pulse Wave Velocity",
SPEED_METERS_PER_SECOND,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_BREATHING_DISTURBANCES_INTENSITY,
GetSleepSummaryField.BREATHING_DISTURBANCES_INTENSITY,
"Breathing disturbances intensity",
"",
"",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_DEEP_DURATION_SECONDS,
GetSleepSummaryField.DEEP_SLEEP_DURATION,
"Deep sleep",
TIME_SECONDS,
"mdi:sleep",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_TOSLEEP_DURATION_SECONDS,
GetSleepSummaryField.DURATION_TO_SLEEP,
"Time to sleep",
TIME_SECONDS,
"mdi:sleep",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_TOWAKEUP_DURATION_SECONDS,
GetSleepSummaryField.DURATION_TO_WAKEUP,
"Time to wakeup",
TIME_SECONDS,
"mdi:sleep-off",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_HEART_RATE_AVERAGE,
GetSleepSummaryField.HR_AVERAGE,
"Average heart rate",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_HEART_RATE_MAX,
GetSleepSummaryField.HR_MAX,
"Maximum heart rate",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_HEART_RATE_MIN,
GetSleepSummaryField.HR_MIN,
"Minimum heart rate",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_LIGHT_DURATION_SECONDS,
GetSleepSummaryField.LIGHT_SLEEP_DURATION,
"Light sleep",
TIME_SECONDS,
"mdi:sleep",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_REM_DURATION_SECONDS,
GetSleepSummaryField.REM_SLEEP_DURATION,
"REM sleep",
TIME_SECONDS,
"mdi:sleep",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_RESPIRATORY_RATE_AVERAGE,
GetSleepSummaryField.RR_AVERAGE,
"Average respiratory rate",
const.UOM_BREATHS_PER_MINUTE,
None,
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_RESPIRATORY_RATE_MAX,
GetSleepSummaryField.RR_MAX,
"Maximum respiratory rate",
const.UOM_BREATHS_PER_MINUTE,
None,
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_RESPIRATORY_RATE_MIN,
GetSleepSummaryField.RR_MIN,
"Minimum respiratory rate",
const.UOM_BREATHS_PER_MINUTE,
None,
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_SCORE,
GetSleepSummaryField.SLEEP_SCORE,
"Sleep score",
const.SCORE_POINTS,
"mdi:medal",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_SNORING,
GetSleepSummaryField.SNORING,
"Snoring",
"",
None,
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_SNORING_EPISODE_COUNT,
GetSleepSummaryField.SNORING_EPISODE_COUNT,
"Snoring episode count",
"",
None,
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_WAKEUP_COUNT,
GetSleepSummaryField.WAKEUP_COUNT,
"Wakeup count",
const.UOM_FREQUENCY,
"mdi:sleep-off",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_WAKEUP_DURATION_SECONDS,
GetSleepSummaryField.WAKEUP_DURATION,
"Wakeup time",
TIME_SECONDS,
"mdi:sleep-off",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
# Webhook measurements.
WithingsAttribute(
Measurement.IN_BED,
NotifyAppli.BED_IN,
"In bed",
"",
"mdi:bed",
BINARY_SENSOR_DOMAIN,
True,
UpdateType.WEBHOOK,
),
]
WITHINGS_MEASUREMENTS_MAP: Dict[Measurement, WithingsAttribute] = {
attr.measurement: attr for attr in WITHINGS_ATTRIBUTES
}
WITHINGS_MEASURE_TYPE_MAP: Dict[
Union[NotifyAppli, GetSleepSummaryField, MeasureType], WithingsAttribute
] = {attr.measute_type: attr for attr in WITHINGS_ATTRIBUTES}
class ConfigEntryWithingsApi(AbstractWithingsApi):
"""Withing API that uses HA resources."""
def __init__(
self,
hass: HomeAssistant,
config_entry: ConfigEntry,
implementation: AbstractOAuth2Implementation,
):
"""Initialize object."""
self._hass = hass
self._config_entry = config_entry
self._implementation = implementation
self.session = OAuth2Session(hass, config_entry, implementation)
def _request(
self, path: str, params: Dict[str, Any], method: str = "GET"
) -> Dict[str, Any]:
"""Perform an async request."""
asyncio.run_coroutine_threadsafe(
self.session.async_ensure_token_valid(), self._hass.loop
)
access_token = self._config_entry.data["token"]["access_token"]
response = requests.request(
method,
f"{self.URL}/{path}",
params=params,
headers={"Authorization": f"Bearer {access_token}"},
)
return response.json()
def json_message_response(message: str, message_code: int) -> Response:
"""Produce common json output."""
return HomeAssistantView.json({"message": message, "code": message_code}, 200)
class WebhookAvailability(IntEnum):
"""Represents various statuses of webhook availability."""
SUCCESS = 0
CONNECT_ERROR = 1
HTTP_ERROR = 2
NOT_WEBHOOK = 3
class WebhookUpdateCoordinator:
"""Coordinates webhook data updates across listeners."""
def __init__(self, hass: HomeAssistant, user_id: int) -> None:
"""Initialize the object."""
self._hass = hass
self._user_id = user_id
self._listeners: List[CALLBACK_TYPE] = []
self.data: MeasurementData = {}
def async_add_listener(self, listener: CALLBACK_TYPE) -> Callable[[], None]:
"""Add a listener."""
self._listeners.append(listener)
@callback
def remove_listener() -> None:
self.async_remove_listener(listener)
return remove_listener
def async_remove_listener(self, listener: CALLBACK_TYPE) -> None:
"""Remove a listener."""
self._listeners.remove(listener)
def update_data(self, measurement: Measurement, value: Any) -> None:
"""Update the data object and notify listeners the data has changed."""
self.data[measurement] = value
self.notify_data_changed()
def notify_data_changed(self) -> None:
"""Notify all listeners the data has changed."""
for listener in self._listeners:
listener()
class DataManager:
"""Manage withing data."""
def __init__(
self,
hass: HomeAssistant,
profile: str,
api: ConfigEntryWithingsApi,
user_id: int,
webhook_config: WebhookConfig,
):
"""Initialize the data manager."""
self._hass = hass
self._api = api
self._user_id = user_id
self._profile = profile
self._webhook_config = webhook_config
self._notify_subscribe_delay = datetime.timedelta(seconds=5)
self._notify_unsubscribe_delay = datetime.timedelta(seconds=1)
self._is_available = True
self._cancel_interval_update_interval: Optional[CALLBACK_TYPE] = None
self._cancel_configure_webhook_subscribe_interval: Optional[
CALLBACK_TYPE
] = None
self._api_notification_id = f"withings_{self._user_id}"
self.subscription_update_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="subscription_update_coordinator",
update_interval=timedelta(minutes=120),
update_method=self.async_subscribe_webhook,
)
self.poll_data_update_coordinator = DataUpdateCoordinator[
Dict[MeasureType, Any]
](
hass,
_LOGGER,
name="poll_data_update_coordinator",
update_interval=timedelta(minutes=120)
if self._webhook_config.enabled
else timedelta(minutes=10),
update_method=self.async_get_all_data,
)
self.webhook_update_coordinator = WebhookUpdateCoordinator(
self._hass, self._user_id
)
self._cancel_subscription_update: Optional[Callable[[], None]] = None
self._subscribe_webhook_run_count = 0
@property
def webhook_config(self) -> WebhookConfig:
"""Get the webhook config."""
return self._webhook_config
@property
def user_id(self) -> int:
"""Get the user_id of the authenticated user."""
return self._user_id
@property
def profile(self) -> str:
"""Get the profile."""
return self._profile
def async_start_polling_webhook_subscriptions(self) -> None:
"""Start polling webhook subscriptions (if enabled) to reconcile their setup."""
self.async_stop_polling_webhook_subscriptions()
def empty_listener() -> None:
pass
self._cancel_subscription_update = (
self.subscription_update_coordinator.async_add_listener(empty_listener)
)
def async_stop_polling_webhook_subscriptions(self) -> None:
"""Stop polling webhook subscriptions."""
if self._cancel_subscription_update:
self._cancel_subscription_update()
self._cancel_subscription_update = None
async def _do_retry(self, func, attempts=3) -> Any:
"""Retry a function call.
Withings' API occasionally and incorrectly throws errors. Retrying the call tends to work.
"""
exception = None
for attempt in range(1, attempts + 1):
_LOGGER.debug("Attempt %s of %s", attempt, attempts)
try:
return await func()
except Exception as exception1: # pylint: disable=broad-except
await asyncio.sleep(0.1)
exception = exception1
continue
if exception:
raise exception
async def async_subscribe_webhook(self) -> None:
"""Subscribe the webhook to withings data updates."""
return await self._do_retry(self._async_subscribe_webhook)
async def _async_subscribe_webhook(self) -> None:
_LOGGER.debug("Configuring withings webhook")
# On first startup, perform a fresh re-subscribe. Withings stops pushing data
# if the webhook fails enough times but they don't remove the old subscription
# config. This ensures the subscription is setup correctly and they start
# pushing again.
if self._subscribe_webhook_run_count == 0:
_LOGGER.debug("Refreshing withings webhook configs")
await self.async_unsubscribe_webhook()
self._subscribe_webhook_run_count += 1
# Get the current webhooks.
response = await self._hass.async_add_executor_job(self._api.notify_list)
subscribed_applis = frozenset(
[
profile.appli
for profile in response.profiles
if profile.callbackurl == self._webhook_config.url
]
)
# Determine what subscriptions need to be created.
ignored_applis = frozenset({NotifyAppli.USER})
to_add_applis = frozenset(
[
appli
for appli in NotifyAppli
if appli not in subscribed_applis and appli not in ignored_applis
]
)
# Subscribe to each one.
for appli in to_add_applis:
_LOGGER.debug(
"Subscribing %s for %s in %s seconds",
self._webhook_config.url,
appli,
self._notify_subscribe_delay.total_seconds(),
)
# Withings will HTTP HEAD the callback_url and needs some downtime
# between each call or there is a higher chance of failure.
await asyncio.sleep(self._notify_subscribe_delay.total_seconds())
await self._hass.async_add_executor_job(
self._api.notify_subscribe, self._webhook_config.url, appli
)
async def async_unsubscribe_webhook(self) -> None:
"""Unsubscribe webhook from withings data updates."""
return await self._do_retry(self._async_unsubscribe_webhook)
async def _async_unsubscribe_webhook(self) -> None:
# Get the current webhooks.
response = await self._hass.async_add_executor_job(self._api.notify_list)
# Revoke subscriptions.
for profile in response.profiles:
_LOGGER.debug(
"Unsubscribing %s for %s in %s seconds",
profile.callbackurl,
profile.appli,
self._notify_unsubscribe_delay.total_seconds(),
)
# Quick calls to Withings can result in the service returning errors. Give them
# some time to cool down.
await asyncio.sleep(self._notify_subscribe_delay.total_seconds())
await self._hass.async_add_executor_job(
self._api.notify_revoke, profile.callbackurl, profile.appli
)
async def async_get_all_data(self) -> Optional[Dict[MeasureType, Any]]:
"""Update all withings data."""
try:
return await self._do_retry(self._async_get_all_data)
except Exception as exception:
# User is not authenticated.
if isinstance(
exception, (UnauthorizedException, AuthFailedException)
) or NOT_AUTHENTICATED_ERROR.match(str(exception)):
context = {
const.PROFILE: self._profile,
"userid": self._user_id,
"source": "reauth",
}
# Check if reauth flow already exists.
flow = next(
iter(
flow
for flow in self._hass.config_entries.flow.async_progress()
if flow.context == context
),
None,
)
if flow:
return
# Start a reauth flow.
await self._hass.config_entries.flow.async_init(
const.DOMAIN,
context=context,
)
return
raise exception
async def _async_get_all_data(self) -> Optional[Dict[MeasureType, Any]]:
_LOGGER.info("Updating all withings data")
return {
**await self.async_get_measures(),
**await self.async_get_sleep_summary(),
}
async def async_get_measures(self) -> Dict[MeasureType, Any]:
"""Get the measures data."""
_LOGGER.debug("Updating withings measures")
response = await self._hass.async_add_executor_job(self._api.measure_get_meas)
# Sort from oldest to newest.
groups = sorted(
query_measure_groups(
response, MeasureTypes.ANY, MeasureGroupAttribs.UNAMBIGUOUS
),
key=lambda group: group.created.datetime,
reverse=False,
)
return {
WITHINGS_MEASURE_TYPE_MAP[measure.type].measurement: round(
float(measure.value * pow(10, measure.unit)), 2
)
for group in groups
for measure in group.measures
}
async def async_get_sleep_summary(self) -> Dict[MeasureType, Any]:
"""Get the sleep summary data."""
_LOGGER.debug("Updating withing sleep summary")
now = dt.utcnow()
yesterday = now - datetime.timedelta(days=1)
yesterday_noon = datetime.datetime(
yesterday.year,
yesterday.month,
yesterday.day,
12,
0,
0,
0,
datetime.timezone.utc,
)
def get_sleep_summary() -> SleepGetSummaryResponse:
return self._api.sleep_get_summary(
lastupdate=yesterday_noon,
data_fields=[
GetSleepSummaryField.BREATHING_DISTURBANCES_INTENSITY,
GetSleepSummaryField.DEEP_SLEEP_DURATION,
GetSleepSummaryField.DURATION_TO_SLEEP,
GetSleepSummaryField.DURATION_TO_WAKEUP,
GetSleepSummaryField.HR_AVERAGE,
GetSleepSummaryField.HR_MAX,
GetSleepSummaryField.HR_MIN,
GetSleepSummaryField.LIGHT_SLEEP_DURATION,
GetSleepSummaryField.REM_SLEEP_DURATION,
GetSleepSummaryField.RR_AVERAGE,
GetSleepSummaryField.RR_MAX,
GetSleepSummaryField.RR_MIN,
GetSleepSummaryField.SLEEP_SCORE,
GetSleepSummaryField.SNORING,
GetSleepSummaryField.SNORING_EPISODE_COUNT,
GetSleepSummaryField.WAKEUP_COUNT,
GetSleepSummaryField.WAKEUP_DURATION,
],
)
response = await self._hass.async_add_executor_job(get_sleep_summary)
# Set the default to empty lists.
raw_values: Dict[GetSleepSummaryField, List[int]] = {
field: [] for field in GetSleepSummaryField
}
# Collect the raw data.
for serie in response.series:
data = serie.data
for field in GetSleepSummaryField:
raw_values[field].append(data._asdict()[field.value])
values: Dict[GetSleepSummaryField, float] = {}
def average(data: List[int]) -> float:
return sum(data) / len(data)
def set_value(field: GetSleepSummaryField, func: Callable) -> None:
non_nones = [
value for value in raw_values.get(field, []) if value is not None
]
values[field] = func(non_nones) if non_nones else None
set_value(GetSleepSummaryField.BREATHING_DISTURBANCES_INTENSITY, average)
set_value(GetSleepSummaryField.DEEP_SLEEP_DURATION, sum)
set_value(GetSleepSummaryField.DURATION_TO_SLEEP, average)
set_value(GetSleepSummaryField.DURATION_TO_WAKEUP, average)
set_value(GetSleepSummaryField.HR_AVERAGE, average)
set_value(GetSleepSummaryField.HR_MAX, average)
set_value(GetSleepSummaryField.HR_MIN, average)
set_value(GetSleepSummaryField.LIGHT_SLEEP_DURATION, sum)
set_value(GetSleepSummaryField.REM_SLEEP_DURATION, sum)
set_value(GetSleepSummaryField.RR_AVERAGE, average)
set_value(GetSleepSummaryField.RR_MAX, average)
set_value(GetSleepSummaryField.RR_MIN, average)
set_value(GetSleepSummaryField.SLEEP_SCORE, max)
set_value(GetSleepSummaryField.SNORING, average)
set_value(GetSleepSummaryField.SNORING_EPISODE_COUNT, sum)
set_value(GetSleepSummaryField.WAKEUP_COUNT, sum)
set_value(GetSleepSummaryField.WAKEUP_DURATION, average)
return {
WITHINGS_MEASURE_TYPE_MAP[field].measurement: round(value, 4)
if value is not None
else None
for field, value in values.items()
}
async def async_webhook_data_updated(self, data_category: NotifyAppli) -> None:
"""Handle scenario when data is updated from a webook."""
_LOGGER.debug("Withings webhook triggered")
if data_category in {
NotifyAppli.WEIGHT,
NotifyAppli.CIRCULATORY,
NotifyAppli.SLEEP,
}:
await self.poll_data_update_coordinator.async_request_refresh()
elif data_category in {NotifyAppli.BED_IN, NotifyAppli.BED_OUT}:
self.webhook_update_coordinator.update_data(
Measurement.IN_BED, data_category == NotifyAppli.BED_IN
)
def get_attribute_unique_id(attribute: WithingsAttribute, user_id: int) -> str:
"""Get a entity unique id for a user's attribute."""
return f"withings_{user_id}_{attribute.measurement.value}"
async def async_get_entity_id(
hass: HomeAssistant, attribute: WithingsAttribute, user_id: int
) -> Optional[str]:
"""Get an entity id for a user's attribute."""
entity_registry: EntityRegistry = (
await hass.helpers.entity_registry.async_get_registry()
)
unique_id = get_attribute_unique_id(attribute, user_id)
entity_id = entity_registry.async_get_entity_id(
attribute.platform, const.DOMAIN, unique_id
)
if entity_id is None:
_LOGGER.error("Cannot find entity id for unique_id: %s", unique_id)
return None
return entity_id
class BaseWithingsSensor(Entity):
"""Base class for withings sensors."""
def __init__(self, data_manager: DataManager, attribute: WithingsAttribute) -> None:
"""Initialize the Withings sensor."""
self._data_manager = data_manager
self._attribute = attribute
self._profile = self._data_manager.profile
self._user_id = self._data_manager.user_id
self._name = f"Withings {self._attribute.measurement.value} {self._profile}"
self._unique_id = get_attribute_unique_id(self._attribute, self._user_id)
self._state_data: Optional[Any] = None
@property
def should_poll(self) -> bool:
"""Return False to indicate HA should not poll for changes."""
return False
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def available(self) -> bool:
"""Return True if entity is available."""
if self._attribute.update_type == UpdateType.POLL:
return self._data_manager.poll_data_update_coordinator.last_update_success
if self._attribute.update_type == UpdateType.WEBHOOK:
return self._data_manager.webhook_config.enabled and (
self._attribute.measurement
in self._data_manager.webhook_update_coordinator.data
)
return True
@property
def unique_id(self) -> str:
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._unique_id
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of this entity, if any."""
return self._attribute.unit_of_measurement
@property
def icon(self) -> str:
"""Icon to use in the frontend, if any."""
return self._attribute.icon
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._attribute.enabled_by_default
@callback
def _on_poll_data_updated(self) -> None:
self._update_state_data(
self._data_manager.poll_data_update_coordinator.data or {}
)
@callback
def _on_webhook_data_updated(self) -> None:
self._update_state_data(
self._data_manager.webhook_update_coordinator.data or {}
)
def _update_state_data(self, data: MeasurementData) -> None:
"""Update the state data."""
self._state_data = data.get(self._attribute.measurement)
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Register update dispatcher."""
if self._attribute.update_type == UpdateType.POLL:
self.async_on_remove(
self._data_manager.poll_data_update_coordinator.async_add_listener(
self._on_poll_data_updated
)
)
self._on_poll_data_updated()
elif self._attribute.update_type == UpdateType.WEBHOOK:
self.async_on_remove(
self._data_manager.webhook_update_coordinator.async_add_listener(
self._on_webhook_data_updated
)
)
self._on_webhook_data_updated()
async def async_get_data_manager(
hass: HomeAssistant, config_entry: ConfigEntry
) -> DataManager:
"""Get the data manager for a config entry."""
hass.data.setdefault(const.DOMAIN, {})
hass.data[const.DOMAIN].setdefault(config_entry.entry_id, {})
config_entry_data = hass.data[const.DOMAIN][config_entry.entry_id]
if const.DATA_MANAGER not in config_entry_data:
profile = config_entry.data.get(const.PROFILE)
_LOGGER.debug("Creating withings data manager for profile: %s", profile)
config_entry_data[const.DATA_MANAGER] = DataManager(
hass,
profile,
ConfigEntryWithingsApi(
hass=hass,
config_entry=config_entry,
implementation=await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, config_entry
),
),
config_entry.data["token"]["userid"],
WebhookConfig(
id=config_entry.data[CONF_WEBHOOK_ID],
url=config_entry.data[const.CONF_WEBHOOK_URL],
enabled=config_entry.data[const.CONF_USE_WEBHOOK],
),
)
return config_entry_data[const.DATA_MANAGER]
def get_data_manager_by_webhook_id(
hass: HomeAssistant, webhook_id: str
) -> Optional[DataManager]:
"""Get a data manager by it's webhook id."""
return next(
iter(
[
data_manager
for data_manager in get_all_data_managers(hass)
if data_manager.webhook_config.id == webhook_id
]
),
None,
)
def get_all_data_managers(hass: HomeAssistant) -> Tuple[DataManager, ...]:
"""Get all configured data managers."""
return tuple(
[
config_entry_data[const.DATA_MANAGER]
for config_entry_data in hass.data[const.DOMAIN].values()
if const.DATA_MANAGER in config_entry_data
]
)
def async_remove_data_manager(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Remove a data manager for a config entry."""
del hass.data[const.DOMAIN][config_entry.entry_id][const.DATA_MANAGER]
async def async_create_entities(
hass: HomeAssistant,
entry: ConfigEntry,
create_func: Callable[[DataManager, WithingsAttribute], Entity],
platform: str,
) -> List[Entity]:
"""Create withings entities from config entry."""
data_manager = await async_get_data_manager(hass, entry)
return [
create_func(data_manager, attribute)
for attribute in get_platform_attributes(platform)
]
def get_platform_attributes(platform: str) -> Tuple[WithingsAttribute, ...]:
"""Get withings attributes used for a specific platform."""
return tuple(
[
attribute
for attribute in WITHINGS_ATTRIBUTES
if attribute.platform == platform
]
)
class WithingsLocalOAuth2Implementation(LocalOAuth2Implementation):
"""Oauth2 implementation that only uses the external url."""
@property
def redirect_uri(self) -> str:
"""Return the redirect uri."""
url = get_url(self.hass, allow_internal=False, prefer_cloud=True)
return f"{url}{AUTH_CALLBACK_PATH}"
| turbokongen/home-assistant | homeassistant/components/withings/common.py | Python | apache-2.0 | 34,960 |
import os
from unipath import Path
from django.core.exceptions import ImproperlyConfigured
import dj_database_url
def env_var(var_name):
"""Get the environment variable var_name or return an exception."""
try:
return os.environ[var_name]
except KeyError:
msg = "Please set the environment variable {}".format(var_name)
raise ImproperlyConfigured(msg)
SECRET_KEY = env_var("MT_SECRET_KEY")
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# ADMIN_PATH controls where the admin urls are.
# e.g. if ADMIN_PATH == 'adminsitemilktea', then the admin site
# should be available at /adminsitemilktea/ instead of /admin/.
ADMIN_PATH = env_var("MT_ADMIN_PATH")
DJANGO_CORE_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'djmoney',
'nested_admin',
]
CUSTOM_APPS = [
'core',
]
INSTALLED_APPS = DJANGO_CORE_APPS + THIRD_PARTY_APPS + CUSTOM_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mt.urls'
WSGI_APPLICATION = 'mt.wsgi.application'
BASE_DIR = Path(__file__).ancestor(3)
MEDIA_ROOT = BASE_DIR.child("media")
STATIC_ROOT = BASE_DIR.child("static")
STATICFILES_DIRS = (
BASE_DIR.child("assets"),
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (BASE_DIR.child("templates"),),
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {'default': dj_database_url.parse(env_var("MT_MYSQL_URL"), conn_max_age = 600)}
DATABASES['default']['ATOMIC_REQUESTS'] = True
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
TIME_ZONE = 'America/Los_Angeles'
LANGUAGE_CODE = 'en-us'
USE_I18N = False
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
| rskwan/mt | mt/mt/settings/base.py | Python | apache-2.0 | 2,992 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest.test import attr
from tempest.test import services
class VolumesGetTest(base.BaseVolumeV1Test):
_interface = "json"
@classmethod
def setUpClass(cls):
super(VolumesGetTest, cls).setUpClass()
cls.client = cls.volumes_client
def _delete_volume(self, volume_id):
resp, _ = self.client.delete_volume(volume_id)
self.assertEqual(202, resp.status)
self.client.wait_for_resource_deletion(volume_id)
def _is_true(self, val):
# NOTE(jdg): Temporary conversion method to get cinder patch
# merged. Then we'll make this strict again and
#specifically check "true" or "false"
if val in ['true', 'True', True]:
return True
else:
return False
def _volume_create_get_update_delete(self, **kwargs):
# Create a volume, Get it's details and Delete the volume
volume = {}
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'Test'}
# Create a volume
resp, volume = self.client.create_volume(size=1,
display_name=v_name,
metadata=metadata,
**kwargs)
self.assertEqual(200, resp.status)
self.assertIn('id', volume)
self.addCleanup(self._delete_volume, volume['id'])
self.assertIn('display_name', volume)
self.assertEqual(volume['display_name'], v_name,
"The created volume name is not equal "
"to the requested name")
self.assertTrue(volume['id'] is not None,
"Field volume id is empty or not found.")
self.client.wait_for_volume_status(volume['id'], 'available')
# Get Volume information
resp, fetched_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
self.assertEqual(v_name,
fetched_volume['display_name'],
'The fetched Volume is different '
'from the created Volume')
self.assertEqual(volume['id'],
fetched_volume['id'],
'The fetched Volume is different '
'from the created Volume')
self.assertEqual(metadata,
fetched_volume['metadata'],
'The fetched Volume is different '
'from the created Volume')
# NOTE(jdg): Revert back to strict true/false checking
# after fix for bug #1227837 merges
boot_flag = self._is_true(fetched_volume['bootable'])
if 'imageRef' in kwargs:
self.assertEqual(boot_flag, True)
if 'imageRef' not in kwargs:
self.assertEqual(boot_flag, False)
# Update Volume
new_v_name = data_utils.rand_name('new-Volume')
new_desc = 'This is the new description of volume'
resp, update_volume = \
self.client.update_volume(volume['id'],
display_name=new_v_name,
display_description=new_desc)
# Assert response body for update_volume method
self.assertEqual(200, resp.status)
self.assertEqual(new_v_name, update_volume['display_name'])
self.assertEqual(new_desc, update_volume['display_description'])
# Assert response body for get_volume method
resp, updated_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
self.assertEqual(volume['id'], updated_volume['id'])
self.assertEqual(new_v_name, updated_volume['display_name'])
self.assertEqual(new_desc, updated_volume['display_description'])
self.assertEqual(metadata, updated_volume['metadata'])
# NOTE(jdg): Revert back to strict true/false checking
# after fix for bug #1227837 merges
boot_flag = self._is_true(updated_volume['bootable'])
if 'imageRef' in kwargs:
self.assertEqual(boot_flag, True)
if 'imageRef' not in kwargs:
self.assertEqual(boot_flag, False)
@attr(type='gate')
def test_volume_get_metadata_none(self):
# Create a volume without passing metadata, get details, and delete
# Create a volume without metadata
volume = self.create_volume(metadata={})
# GET Volume
resp, fetched_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
self.assertEqual(fetched_volume['metadata'], {})
@attr(type='smoke')
def test_volume_create_get_update_delete(self):
self._volume_create_get_update_delete()
@attr(type='smoke')
@services('image')
def test_volume_create_get_update_delete_from_image(self):
self._volume_create_get_update_delete(imageRef=self.
config.compute.image_ref)
@attr(type='gate')
def test_volume_create_get_update_delete_as_clone(self):
origin = self.create_volume()
self._volume_create_get_update_delete(source_volid=origin['id'])
class VolumesGetTestXML(VolumesGetTest):
_interface = "xml"
| BeenzSyed/tempest | tempest/api/volume/test_volumes_get.py | Python | apache-2.0 | 6,033 |
import time
from aiohttp import web
import asyncio
import aiomcache
from aiohttp_session import setup, get_session
from aiohttp_session.memcached_storage import MemcachedStorage
async def handler(request: web.Request) -> web.Response:
session = await get_session(request)
last_visit = session['last_visit'] if 'last_visit' in session else None
session['last_visit'] = time.time()
text = 'Last visited: {}'.format(last_visit)
return web.Response(text=text)
async def make_app() -> web.Application:
app = web.Application()
mc = aiomcache.Client("127.0.0.1", 11211, loop=loop)
setup(app, MemcachedStorage(mc))
app.router.add_get('/', handler)
return app
loop = asyncio.get_event_loop()
app = loop.run_until_complete(make_app())
web.run_app(app)
| aio-libs/aiohttp_session | demo/memcached_storage.py | Python | apache-2.0 | 788 |
# -*- coding: utf-8 -*-
#
# Copyright 2014 - Intel.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlalchemy as sa
import uuid
from healing.db.sqlalchemy import model_base as mb
## Helpers
#TODO: i think oslo has uuid utils, replace here!
def _generate_unicode_uuid():
return unicode(str(uuid.uuid4()))
def _id_column():
return sa.Column(sa.String(36),
primary_key=True,
default=_generate_unicode_uuid)
class Action(mb.HealingBase):
"""Contains info about actions."""
__tablename__ = 'actions'
__table_args__ = (
sa.UniqueConstraint('id'),
)
id = _id_column()
name = sa.Column(sa.String(80))
status = sa.Column(sa.String(20), nullable=True, default='init')
action_meta = sa.Column(sa.String(200), nullable=True)
target_id = sa.Column(sa.String(80))
project_id = sa.Column(sa.String(80), nullable=True)
request_id = sa.Column(sa.String(80), nullable=True)
internal_data = sa.Column(sa.String(200), nullable=True)
output = sa.Column(sa.Text(), nullable=True)
class SLAContract(mb.HealingBase):
"""Contains info about the SLA contracts."""
__tablename__ = 'sla_contract'
__table_args__ = (
sa.UniqueConstraint('id'),
)
id = _id_column()
project_id = sa.Column(sa.String(80), nullable=True)
type = sa.Column(sa.String(255), nullable=True)
value = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
action = sa.Column(sa.String(255), nullable=True)
resource_id = sa.Column(sa.String(255), nullable=True)
action_options = sa.Column(sa.String(255), nullable=True)
class AlarmTrack(mb.HealingBase):
"""Contains info about the ALARMs."""
__tablename__ = 'alarm_track'
__table_args__ = (
sa.UniqueConstraint('id'),
)
id = _id_column()
alarm_id = sa.Column(sa.String(80))
contract_id = sa.Column(sa.String(80))
type = sa.Column(sa.String(100))
meter = sa.Column(sa.String(100))
threshold = sa.Column(sa.String(20))
operator = sa.Column(sa.String(5))
period = sa.Column(sa.Integer(), default=10)
evaluation_period = sa.Column(sa.Integer(), default=1)
name = sa.Column(sa.String(255))
query = sa.Column(sa.String(255))
statistic = sa.Column(sa.String(255))
# if not tru SLA
action = sa.Column(sa.String(255))
class FailureTrack(mb.HealingBase):
"""Contains info about the SLA contracts."""
__tablename__ = 'failure_track'
__table_args__ = (
sa.UniqueConstraint('id'),
)
id = _id_column()
alarm_id = sa.Column(sa.String(255))
data = sa.Column(sa.String(255), nullable=True)
contract_names = sa.Column(sa.String(255), nullable=True)
| lcostantino/healing-os | healing/db/sqlalchemy/models.py | Python | apache-2.0 | 3,311 |
import re
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import _lazy_re_compile, BaseValidator, URLValidator
class EnhancedURLValidator(URLValidator):
"""
Extends Django's built-in URLValidator to permit the use of hostnames with no domain extension and enforce allowed
schemes specified in the configuration.
"""
fqdn_re = URLValidator.hostname_re + URLValidator.domain_re + URLValidator.tld_re
host_res = [URLValidator.ipv4_re, URLValidator.ipv6_re, fqdn_re, URLValidator.hostname_re]
regex = _lazy_re_compile(
r'^(?:[a-z0-9\.\-\+]*)://' # Scheme (enforced separately)
r'(?:\S+(?::\S*)?@)?' # HTTP basic authentication
r'(?:' + '|'.join(host_res) + ')' # IPv4, IPv6, FQDN, or hostname
r'(?::\d{2,5})?' # Port number
r'(?:[/?#][^\s]*)?' # Path
r'\Z', re.IGNORECASE)
schemes = settings.ALLOWED_URL_SCHEMES
class ExclusionValidator(BaseValidator):
"""
Ensure that a field's value is not equal to any of the specified values.
"""
message = 'This value may not be %(show_value)s.'
def compare(self, a, b):
return a in b
def validate_regex(value):
"""
Checks that the value is a valid regular expression. (Don't confuse this with RegexValidator, which *uses* a regex
to validate a value.)
"""
try:
re.compile(value)
except re.error:
raise ValidationError(f"{value} is not a valid regular expression.")
| digitalocean/netbox | netbox/utilities/validators.py | Python | apache-2.0 | 1,580 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that computes statistics used to validate sparse features.
Currently, this module generates the following statistics for each
sparse feature:
- missing_value: Number of examples missing the value_feature.
- missing_index: A RankHistogram from index_name to the number of examples
missing the corresponding index_feature.
- min_length_diff: A RankHistogram from index_name to the minimum of
len(index_feature) - len(value_feature).
- max_length_diff: A RankHistogram from index_name to the maximum of
len(index_feature) - len(value_feature).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, Iterable, List, Text, Tuple, Union
from tensorflow_data_validation import types
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.statistics.generators.constituents import count_missing_generator
from tensorflow_data_validation.statistics.generators.constituents import length_diff_generator
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
# TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is
# resolved.
from tfx_bsl.types import tfx_namedtuple # pylint: disable=g-bad-import-order
# LINT.IfChange(custom_stat_names)
_MAX_LENGTH_DIFF_NAME = 'max_length_diff'
_MIN_LENGTH_DIFF_NAME = 'min_length_diff'
_MISSING_INDEX_NAME = 'missing_index'
_MISSING_VALUE_NAME = 'missing_value'
# LINT.ThenChange(../../anomalies/schema.cc:sparse_feature_custom_stat_names)
# Named tuple containing the FeaturePaths for the value and index features
# that comprise a given sparse feature.
_SparseFeatureComponents = tfx_namedtuple.namedtuple(
'_SparseFeatureComponents', ['value_feature', 'index_features'])
def _get_all_sparse_features(
schema: schema_pb2.Schema
) -> List[Tuple[types.FeaturePath, schema_pb2.SparseFeature]]:
"""Returns all sparse features in a schema."""
def _recursion_helper(
parent_path: types.FeaturePath, container: Union[schema_pb2.Schema,
schema_pb2.StructDomain]
) -> List[Tuple[types.FeaturePath, schema_pb2.SparseFeature]]:
"""Helper function that is used in finding sparse features in a tree."""
result = []
for sf in container.sparse_feature:
# Sparse features do not have a struct_domain, so they cannot be parent
# features. Thus, once this reaches a sparse feature, add it to the
# result.
result.append((parent_path.child(sf.name), sf))
for f in container.feature:
if f.type == schema_pb2.STRUCT:
result.extend(
_recursion_helper(parent_path.child(f.name), f.struct_domain))
return result
return _recursion_helper(types.FeaturePath([]), schema)
def _get_components(
sparse_features: Iterable[Tuple[types.FeaturePath,
schema_pb2.SparseFeature]]
) -> Dict[types.FeaturePath, _SparseFeatureComponents]:
"""Returns the index and value feature paths that comprise sparse features."""
# A dict mapping sparse feature paths to their component index and value
# feature paths.
sparse_feature_components = dict()
# The index and value features for a given sparse feature have the same parent
# path as the sparse feature.
for path, feature in sparse_features:
parent_path = path.parent()
value_feature = parent_path.child(feature.value_feature.name)
index_features = set()
for index_feature in feature.index_feature:
index_features.add(parent_path.child(index_feature.name))
sparse_feature_components[path] = _SparseFeatureComponents(
value_feature, index_features)
return sparse_feature_components
class SparseFeatureStatsGenerator(stats_generator.CompositeStatsGenerator):
"""Generates statistics for sparse features."""
def __init__(self,
schema: schema_pb2.Schema,
name: Text = 'SparseFeatureStatsGenerator') -> None:
"""Initializes a sparse feature statistics generator.
Args:
schema: A required schema for the dataset.
name: An optional unique name associated with the statistics generator.
"""
self._sparse_feature_components = _get_components(
_get_all_sparse_features(schema))
# Create length diff generators for each index / value pair and count
# missing generator for all paths.
constituents = []
for _, (value, indices) in self._sparse_feature_components.items():
required_paths = [value] + list(indices)
constituents.append(
count_missing_generator.CountMissingGenerator(value, required_paths))
for index in indices:
constituents.append(
length_diff_generator.LengthDiffGenerator(index, value,
required_paths))
constituents.append(
count_missing_generator.CountMissingGenerator(
index, required_paths))
super(SparseFeatureStatsGenerator, self).__init__(name, constituents,
schema)
def extract_composite_output(self, accumulator):
stats = statistics_pb2.DatasetFeatureStatistics()
for feature_path, (value,
indices) in self._sparse_feature_components.items():
required_paths = [value] + list(indices)
feature_stats = stats.features.add(path=feature_path.to_proto())
feature_stats.custom_stats.add(
name=_MISSING_VALUE_NAME,
num=accumulator[count_missing_generator.CountMissingGenerator.key(
value, required_paths)])
index_features_num_missing_histogram = statistics_pb2.RankHistogram()
max_length_diff_histogram = statistics_pb2.RankHistogram()
min_length_diff_histogram = statistics_pb2.RankHistogram()
for index in sorted(indices):
index_label = index.steps()[-1]
missing_bucket = index_features_num_missing_histogram.buckets.add()
missing_bucket.label = index_label
missing_bucket.sample_count = accumulator[
count_missing_generator.CountMissingGenerator.key(
index, required_paths)]
min_diff, max_diff = accumulator[
length_diff_generator.LengthDiffGenerator.key(
index, value, required_paths)]
max_length_bucket = max_length_diff_histogram.buckets.add()
max_length_bucket.label = index_label
max_length_bucket.sample_count = max_diff
min_length_bucket = min_length_diff_histogram.buckets.add()
min_length_bucket.label = index_label
min_length_bucket.sample_count = min_diff
feature_stats.custom_stats.add(
name=_MISSING_INDEX_NAME,
rank_histogram=index_features_num_missing_histogram)
feature_stats.custom_stats.add(
name=_MAX_LENGTH_DIFF_NAME, rank_histogram=max_length_diff_histogram)
feature_stats.custom_stats.add(
name=_MIN_LENGTH_DIFF_NAME, rank_histogram=min_length_diff_histogram)
return stats
| tensorflow/data-validation | tensorflow_data_validation/statistics/generators/sparse_feature_stats_generator.py | Python | apache-2.0 | 7,813 |
#!/usr/bin/env python3
import sys
try:
import __builtin__
except ImportError:
import builtins as __builtin__
import os
# python puts the program's directory path in sys.path[0]. In other words, the user ordinarily has no way
# to override python's choice of a module from its own dir. We want to have that ability in our environment.
# However, we don't want to break any established python modules that depend on this behavior. So, we'll
# save the value from sys.path[0], delete it, import our modules and then restore sys.path to its original
# value.
save_path_0 = sys.path[0]
del sys.path[0]
from gen_print import *
from gen_arg import *
from gen_plug_in import *
# Restore sys.path[0].
sys.path.insert(0, save_path_0)
# Create parser object to process command line parameters and args.
# Create parser object.
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS] [PLUG_IN_DIR_PATHS]',
description="%(prog)s will validate the plug-in packages passed to it."
+ " It will also print a list of the absolute plug-in"
+ " directory paths for use by the calling program.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-+')
# Create arguments.
parser.add_argument(
'plug_in_dir_paths',
nargs='?',
default="",
help=plug_in_dir_paths_help_text + default_string)
parser.add_argument(
'--mch_class',
default="obmc",
help=mch_class_help_text + default_string)
# The stock_list will be passed to gen_get_options. We populate it with the names of stock parm options we
# want. These stock parms are pre-defined by gen_get_options.
stock_list = [("test_mode", 0), ("quiet", 1), ("debug", 0)]
def exit_function(signal_number=0,
frame=None):
r"""
Execute whenever the program ends normally or with the signals that we catch (i.e. TERM, INT).
"""
dprint_executing()
dprint_var(signal_number)
qprint_pgm_footer()
def signal_handler(signal_number, frame):
r"""
Handle signals. Without a function to catch a SIGTERM or SIGINT, our program would terminate immediately
with return code 143 and without calling our exit_function.
"""
# Our convention is to set up exit_function with atexit.registr() so there is no need to explicitly call
# exit_function from here.
dprint_executing()
# Calling exit prevents us from returning to the code that was running when we received the signal.
exit(0)
def validate_parms():
r"""
Validate program parameters, etc. Return True or False accordingly.
"""
gen_post_validation(exit_function, signal_handler)
return True
def main():
r"""
This is the "main" function. The advantage of having this function vs just doing this in the true
mainline is that you can:
- Declare local variables
- Use "return" instead of "exit".
- Indent 4 chars like you would in any function.
This makes coding more consistent, i.e. it's easy to move code from here into a function and vice versa.
"""
if not gen_get_options(parser, stock_list):
return False
if not validate_parms():
return False
qprint_pgm_header()
# Access program parameter globals.
global plug_in_dir_paths
global mch_class
plug_in_packages_list = return_plug_in_packages_list(plug_in_dir_paths,
mch_class)
qprint_var(plug_in_packages_list)
# As stated in the help text, this program must print the full paths of each selected plug in.
for plug_in_dir_path in plug_in_packages_list:
print(plug_in_dir_path)
return True
# Main
if not main():
exit(1)
| openbmc/openbmc-test-automation | bin/validate_plug_ins.py | Python | apache-2.0 | 3,734 |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import numbers
from torch.distributions.transforms import Transform
from pyro.ops.tensor_utils import safe_normalize
from .. import constraints
class Normalize(Transform):
"""
Safely project a vector onto the sphere wrt the ``p`` norm. This avoids
the singularity at zero by mapping to the vector ``[1, 0, 0, ..., 0]``.
"""
domain = constraints.real_vector
codomain = constraints.sphere
bijective = False
def __init__(self, p=2, cache_size=0):
assert isinstance(p, numbers.Number)
assert p >= 0
self.p = p
super().__init__(cache_size=cache_size)
def __eq__(self, other):
return type(self) == type(other) and self.p == other.p
def _call(self, x):
return safe_normalize(x, p=self.p)
def _inverse(self, y):
return y
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
return Normalize(self.p, cache_size=cache_size)
| uber/pyro | pyro/distributions/transforms/normalize.py | Python | apache-2.0 | 1,072 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2012-2-5
@author: zepheir
'''
import sys
sys.path.append('/app/srv/src')
from binascii import b2a_hex
try:
from twisted.internet import epollreactor
epollreactor.install()
except:
pass
from twisted.internet import reactor
from twisted.python import log
from twisted.application import service
from zhyDB import ZhyDB
import Zoro
from ussop import sipai as Sipai
import time
import config
from config import *
def ReceiveData(*data):
if DEBUG: print 'print data----------------', data
# 常量
# ZDB = SipaiDB()
zhy = ZhyDB()
SipaiModsDict = zhy.listSipaiMods(allSDS=None)
# factoryDict = {}
# modules = {}
class SampleServer(object):
"""docstring for SampleServer"""
def __init__(self, *sds):
super(SampleServer, self).__init__()
self.sds = sds
self.host,self.port = self.sds[0], int(self.sds[1])
self.modules = []
self.mod = object
self.nowtype=''
self.factory = Zoro.SetupModbusConnect(self.host, self.port, self.ReceiveData, reConnectMode=False)
self.factory.spendtime = 0.3
self.setup()
def setup(self):
self.modules += SipaiModsDict[self.sds]
self.sampletimer = SipaiSampleTimer
if ECHO: print "*********** Time pass from start: %s"%(time.ctime()), self.factory.connection.getDestination(),self.factory.getState()
def ReceiveData(self, *data):
if DEBUG: print ' ===> Received Data:', data, b2a_hex(data[2])
# global zhy
_result = self.mod.dealdata(data[2])
print '----------result---------',_result
print data[0],data[1],zhy.updateSipaiResults(
ip=data[1][0],
port=data[1][1],
addr=data[0],
type=self.nowtype,
# value=b2a_hex(data[2])
value=_result
)
def update(self):
if DEBUG: print "[",self.sds,"] starting in the SampleServer Class!"
if len(self.modules)>0:
modinfo=self.modules.pop(0)
self.nowtype = modinfo['type']
self.mod = Sipai.createspm(type=modinfo['type'], address=modinfo['addr'])
_cmd = self.mod.cmd(self.mod.CMD_READDATA)
zhy.setSipaiModState(
ip=self.host,
port=str(self.port),
addr=modinfo['addr'],
type=self.nowtype,
state='reading'
)
if DEBUG: print "===> Output command:",b2a_hex(_cmd)
reactor.callLater(0.1, self.factory.protocol.SendCmd, _cmd)
reactor.callLater(self.factory.spendtime, self.update)
self.sampletimer-=self.factory.spendtime
else:
if SERVERRECONNECT:
reactor.callLater(self.factory.spendtime, self.factory.connection.disconnect)
reactor.callLater(SdsConnectTimer,self.factory.connection.connect)
reactor.callLater(SdsConnectTimer,self.setup)
reactor.callLater(self.sampletimer-SdsConnectTimer, self.update)
# reactor.callLater(SdsConnectTimer+self.factory.spendtime, self.update)
servs ={}
def main():
for sds in SipaiModsDict:
servs[sds]=SampleServer(sds[0],sds[1])
servs[sds].update()
# time.sleep(0.2)
# if DEBUG:
# # servs1=SampleServer('130.139.200.50','6020')
# servs2=SampleServer('130.139.200.51','10001')
# # servs3=SampleServer('130.139.200.56','10001')
# # servs1.update()
# servs2.update()
# # servs3.update()
# else:
# for sds in SipaiModsDict:
# servs[sds]=SampleServer(sds[0],sds[1])
# servs[sds].update()
# time.sleep(0.2)
if __name__ == '__main__':
import sys
main()
reactor.run()
print 'reactor stopped!'
sys.exit(1)
elif __name__ =="__builtin__":
import sys
main()
application = service.Application("SIPAI")
| zepheir/pySrv_sipai | srv/src/sipaiSampleServer.py | Python | apache-2.0 | 4,049 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.jvm_compile.analysis_tools import AnalysisTools
from pants.backend.jvm.tasks.jvm_compile.java.jmake_analysis import JMakeAnalysis
from pants.backend.jvm.tasks.jvm_compile.java.jmake_analysis_parser import JMakeAnalysisParser
from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.util.dirutil import relativize_paths, safe_mkdir
# From http://kenai.com/projects/jmake/sources/mercurial/content
# /src/com/sun/tools/jmake/Main.java?rev=26
# Main.mainExternal docs.
_JMAKE_ERROR_CODES = {
-1: 'invalid command line option detected',
-2: 'error reading command file',
-3: 'project database corrupted',
-4: 'error initializing or calling the compiler',
-5: 'compilation error',
-6: 'error parsing a class file',
-7: 'file not found',
-8: 'I/O exception',
-9: 'internal jmake exception',
-10: 'deduced and actual class name mismatch',
-11: 'invalid source file extension',
-12: 'a class in a JAR is found dependent on a class with the .java source',
-13: 'more than one entry for the same class is found in the project',
-20: 'internal Java error (caused by java.lang.InternalError)',
-30: 'internal Java error (caused by java.lang.RuntimeException).'
}
# When executed via a subprocess return codes will be treated as unsigned
_JMAKE_ERROR_CODES.update((256 + code, msg) for code, msg in _JMAKE_ERROR_CODES.items())
class JmakeCompile(JvmCompile):
"""Compile Java code using JMake."""
_name = 'java'
_file_suffix = '.java'
_supports_concurrent_execution = False
_JMAKE_MAIN = 'org.pantsbuild.jmake.Main'
@classmethod
def get_args_default(cls, bootstrap_option_values):
workdir_gen = os.path.relpath(os.path.join(bootstrap_option_values.pants_workdir, 'gen'),
get_buildroot())
return ('-C-encoding', '-CUTF-8', '-C-g', '-C-Tcolor',
# Don't warn for generated code.
'-C-Tnowarnprefixes',
'-C{0}'.format(workdir_gen),
# Suppress warning for annotations with no processor - we know there are many of these!
'-C-Tnowarnregex', '-C^(warning: )?No processor claimed any of these annotations: .*')
@classmethod
def get_warning_args_default(cls):
return ('-C-Xlint:all', '-C-Xlint:-serial', '-C-Xlint:-path', '-C-deprecation')
@classmethod
def get_no_warning_args_default(cls):
return ('-C-Xlint:none', '-C-nowarn')
@classmethod
def register_options(cls, register):
super(JmakeCompile, cls).register_options(register)
register('--use-jmake', advanced=True, action='store_true', default=True,
fingerprint=True,
help='Use jmake to compile Java targets')
register('--source', advanced=True, fingerprint=True,
help='Provide source compatibility with this release. Overrides the jvm platform '
'source.',
deprecated_hint='The -source arg to javac should be specified by the jvm-platform.',
deprecated_version='0.0.44')
register('--target', advanced=True, fingerprint=True,
help='Generate class files for this JVM version. Overrides the jvm platform target.',
deprecated_hint='The -target arg to javac should be specified by the jvm-platform.',
deprecated_version='0.0.44')
cls.register_jvm_tool(register, 'jmake')
cls.register_jvm_tool(register, 'java-compiler')
def select(self, target):
return self.get_options().use_jmake and super(JmakeCompile, self).select(target)
def __init__(self, *args, **kwargs):
super(JmakeCompile, self).__init__(*args, **kwargs)
self.set_distribution(jdk=True)
self._buildroot = get_buildroot()
# The depfile is generated by org.pantsbuild.tools.compiler.Compiler
# and includes information about package-private classes -- e.g.
# the case where Foo.java also defines class Bar. This allows jmake
# to correctly include these files in its analysis.
self._depfile_folder = os.path.join(self.workdir, 'jmake-depfiles')
@property
def _depfile(self):
safe_mkdir(self._depfile_folder)
return os.path.join(self._depfile_folder, 'global_depfile')
def create_analysis_tools(self):
return AnalysisTools(self.context.java_home, JMakeAnalysisParser(), JMakeAnalysis)
def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file,
log_file, settings):
relative_classpath = relativize_paths(classpath, self._buildroot)
jmake_classpath = self.tool_classpath('jmake')
args = [
'-classpath', ':'.join(relative_classpath),
'-d', classes_output_dir,
'-pdb', analysis_file,
'-pdb-text-format',
]
# TODO: This file should always exist for modern jmake installs; this check should
# be removed via a Task-level identity bump after:
# https://github.com/pantsbuild/pants/issues/1351
if os.path.exists(self._depfile):
args.extend(['-depfile', self._depfile])
compiler_classpath = self.tool_classpath('java-compiler')
args.extend([
'-jcpath', ':'.join(compiler_classpath),
'-jcmainclass', 'org.pantsbuild.tools.compiler.Compiler',
])
if not self.get_options().colors:
filtered_args = filter(lambda arg: not arg == '-C-Tcolor', self._args)
else:
filtered_args = self._args
args.extend(filtered_args)
args.extend(settings.args)
if '-C-source' in args:
raise TaskError("Set the source Java version with the 'source' or with the jvm platform, not "
"in 'args'.")
if '-C-target' in args:
raise TaskError("Set the target JVM version with the 'target' option or with the jvm "
"platform, not in 'args'.")
if self.get_options().source or self.get_options().target:
self.context.log.warn('--compile-java-source and --compile-java-target trample and override '
'target jvm platform settings, and probably should not be used except '
'for testing.')
source_level = self.get_options().source or settings.source_level
target_level = self.get_options().target or settings.target_level
if source_level:
args.extend(['-C-source', '-C{0}'.format(source_level)])
if target_level:
args.extend(['-C-target', '-C{0}'.format(target_level)])
args.append('-C-Tdependencyfile')
args.append('-C{}'.format(self._depfile))
jvm_options = list(self._jvm_options)
args.extend(sources)
result = self.runjava(classpath=jmake_classpath,
main=JmakeCompile._JMAKE_MAIN,
jvm_options=jvm_options,
args=args,
workunit_name='jmake',
workunit_labels=[WorkUnitLabel.COMPILER])
if result:
default_message = 'Unexpected error - JMake returned {}'.format(result)
raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))
| sid-kap/pants | src/python/pants/backend/jvm/tasks/jvm_compile/java/java_compile.py | Python | apache-2.0 | 7,482 |
import logging
import os
import shutil
import subprocess
DEVNULL = open(os.devnull, 'wb')
class ShellError(Exception):
def __init__(self, command, err_no, message=None):
self.command = command
self.errno = err_no
self.message = message
def __str__(self):
string = "Command '%s' failed with exit code %d" % (self.command, self.errno)
if self.message is not None:
string += ': ' + repr(self.message)
return string
def __repr__(self):
return self.__str__()
def shell_exec(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, background=False, env=None):
str_cmd = cmd if isinstance(cmd, str) else ' '.join(cmd)
logging.getLogger('shell_exec').debug(str_cmd)
message = None
if background:
if stdout == subprocess.PIPE:
stdout = DEVNULL
if stderr == subprocess.PIPE:
stderr = DEVNULL
elif stdin is not None and isinstance(stdin, str):
message = stdin
stdin = subprocess.PIPE
process = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr, shell=isinstance(cmd, str), env=env)
stdout_dump = None
stderr_dump = None
return_code = 0
if message is not None or stdout == subprocess.PIPE or stderr == subprocess.PIPE:
stdout_dump, stderr_dump = process.communicate(message)
return_code = process.returncode
elif not background:
return_code = process.wait()
if background:
return process
else:
if stdout_dump is not None:
stdout_dump = stdout_dump.decode('utf-8')
if stderr_dump is not None:
stderr_dump = stderr_dump.decode('utf-8')
if return_code != 0:
raise ShellError(str_cmd, return_code, stderr_dump)
else:
return stdout_dump, stderr_dump
def mem_size(megabytes=True):
mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
return mem_bytes / (1024. ** 2) if megabytes else mem_bytes
def lc(filename):
with open(filename) as stream:
count = 0
for _ in stream:
count += 1
return count
def cat(files, output, buffer_size=10 * 1024 * 1024):
with open(output, 'wb') as blob:
for f in files:
with open(f, 'rb') as source:
shutil.copyfileobj(source, blob, buffer_size)
| ModernMT/MMT | cli/utils/osutils.py | Python | apache-2.0 | 2,402 |
"""
Support for Huawei LTE routers.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/huawei_lte/
"""
from datetime import timedelta
from functools import reduce
import logging
import operator
import voluptuous as vol
import attr
from homeassistant.const import (
CONF_URL, CONF_USERNAME, CONF_PASSWORD, EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['huawei-lte-api==1.0.12']
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=10)
DOMAIN = 'huawei_lte'
DATA_KEY = 'huawei_lte'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All(cv.ensure_list, [vol.Schema({
vol.Required(CONF_URL): cv.url,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})])
}, extra=vol.ALLOW_EXTRA)
@attr.s
class RouterData:
"""Class for router state."""
client = attr.ib()
device_information = attr.ib(init=False, factory=dict)
device_signal = attr.ib(init=False, factory=dict)
traffic_statistics = attr.ib(init=False, factory=dict)
wlan_host_list = attr.ib(init=False, factory=dict)
def __getitem__(self, path: str):
"""
Get value corresponding to a dotted path.
The first path component designates a member of this class
such as device_information, device_signal etc, and the remaining
path points to a value in the member's data structure.
"""
root, *rest = path.split(".")
try:
data = getattr(self, root)
except AttributeError as err:
raise KeyError from err
return reduce(operator.getitem, rest, data)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self) -> None:
"""Call API to update data."""
self.device_information = self.client.device.information()
_LOGGER.debug("device_information=%s", self.device_information)
self.device_signal = self.client.device.signal()
_LOGGER.debug("device_signal=%s", self.device_signal)
self.traffic_statistics = self.client.monitoring.traffic_statistics()
_LOGGER.debug("traffic_statistics=%s", self.traffic_statistics)
self.wlan_host_list = self.client.wlan.host_list()
_LOGGER.debug("wlan_host_list=%s", self.wlan_host_list)
@attr.s
class HuaweiLteData:
"""Shared state."""
data = attr.ib(init=False, factory=dict)
def get_data(self, config):
"""Get the requested or the only data value."""
if CONF_URL in config:
return self.data.get(config[CONF_URL])
if len(self.data) == 1:
return next(iter(self.data.values()))
return None
def setup(hass, config) -> bool:
"""Set up Huawei LTE component."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = HuaweiLteData()
for conf in config.get(DOMAIN, []):
_setup_lte(hass, conf)
return True
def _setup_lte(hass, lte_config) -> None:
"""Set up Huawei LTE router."""
from huawei_lte_api.AuthorizedConnection import AuthorizedConnection
from huawei_lte_api.Client import Client
url = lte_config[CONF_URL]
username = lte_config[CONF_USERNAME]
password = lte_config[CONF_PASSWORD]
connection = AuthorizedConnection(
url,
username=username,
password=password,
)
client = Client(connection)
data = RouterData(client)
data.update()
hass.data[DATA_KEY].data[url] = data
def cleanup(event):
"""Clean up resources."""
client.user.logout()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
| persandstrom/home-assistant | homeassistant/components/huawei_lte.py | Python | apache-2.0 | 3,730 |
#!/usr/bin/env python
"""Conditional import for Chipsec. Only Linux is supported at this stage."""
| google/grr | grr/client/grr_response_client/components/chipsec_support/actions/__init__.py | Python | apache-2.0 | 99 |
# Copyright 2016 MongoDB Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from demangler import demangle
except:
from cppfilt import demangle
| mongodb-labs/disasm | app/disasm_demangler.py | Python | apache-2.0 | 654 |
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
# login
wd = self.app.wd
self.app.open_home_page_group ()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_id("LoginForm").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
def logincon(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_id("LoginForm").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click() | ElenaBadWolf/for_homework1 | fixture/session.py | Python | apache-2.0 | 1,287 |
# Name: tools.py
# Purpose: XRC editor, toolbar
# Author: Roman Rolinsky <rolinsky@mema.ucl.ac.be>
# Created: 19.03.2003
# RCS-ID: $Id: tools.py,v 1.12 2006/05/17 03:57:57 RD Exp $
from xxx import * # xxx imports globals and params
from tree import ID_NEW
# Icons
import images
# Groups of controls
GROUPNUM = 4
GROUP_WINDOWS, GROUP_MENUS, GROUP_SIZERS, GROUP_CONTROLS = range(GROUPNUM)
# States depending on current selection and Control/Shift keys
STATE_ROOT, STATE_MENUBAR, STATE_TOOLBAR, STATE_MENU, STATE_STDDLGBTN, STATE_ELSE = range(6)
# Left toolbar for GUI elements
class Tools(wx.Panel):
TOOL_SIZE = (30, 30)
def __init__(self, parent):
if wx.Platform == '__WXGTK__':
wx.Panel.__init__(self, parent, -1,
style=wx.RAISED_BORDER|wx.WANTS_CHARS)
else:
wx.Panel.__init__(self, parent, -1, style=wx.WANTS_CHARS)
# Create sizer for groups
self.sizer = wx.BoxSizer(wx.VERTICAL)
# Data to create buttons
self.groups = []
self.ctrl = self.shift = False
# Current state (what to enable/disable)
self.state = None
groups = [
["Windows",
(ID_NEW.FRAME, images.getToolFrameBitmap()),
(ID_NEW.DIALOG, images.getToolDialogBitmap()),
(ID_NEW.PANEL, images.getToolPanelBitmap())],
["Menus",
(ID_NEW.TOOL_BAR, images.getToolToolBarBitmap()),
(ID_NEW.MENU_BAR, images.getToolMenuBarBitmap()),
(ID_NEW.MENU, images.getToolMenuBitmap()),
(ID_NEW.TOOL, images.getToolToolBitmap()),
(ID_NEW.MENU_ITEM, images.getToolMenuItemBitmap()),
(ID_NEW.SEPARATOR, images.getToolSeparatorBitmap())],
["Sizers",
(ID_NEW.BOX_SIZER, images.getToolBoxSizerBitmap()),
(ID_NEW.STATIC_BOX_SIZER, images.getToolStaticBoxSizerBitmap()),
(ID_NEW.GRID_SIZER, images.getToolGridSizerBitmap()),
(ID_NEW.FLEX_GRID_SIZER, images.getToolFlexGridSizerBitmap()),
(ID_NEW.GRID_BAG_SIZER, images.getToolGridBagSizerBitmap()),
(ID_NEW.SPACER, images.getToolSpacerBitmap())],
["Controls",
(ID_NEW.STATIC_TEXT, images.getToolStaticTextBitmap()),
(ID_NEW.STATIC_BITMAP, images.getToolStaticBitmapBitmap()),
(ID_NEW.STATIC_LINE, images.getToolStaticLineBitmap()),
(ID_NEW.BUTTON, images.getToolButtonBitmap()),
(ID_NEW.BITMAP_BUTTON, images.getToolBitmapButtonBitmap()),
(ID_NEW.STATIC_BOX, images.getToolStaticBoxBitmap()),
(ID_NEW.TEXT_CTRL, images.getToolTextCtrlBitmap()),
(ID_NEW.COMBO_BOX, images.getToolComboBoxBitmap()),
(ID_NEW.CHOICE, images.getToolChoiceBitmap()),
(ID_NEW.RADIO_BUTTON, images.getToolRadioButtonBitmap()),
(ID_NEW.CHECK_BOX, images.getToolCheckBoxBitmap()),
(ID_NEW.RADIO_BOX, images.getToolRadioBoxBitmap()),
(ID_NEW.SPIN_CTRL, images.getToolSpinCtrlBitmap()),
(ID_NEW.SPIN_BUTTON, images.getToolSpinButtonBitmap()),
(ID_NEW.SCROLL_BAR, images.getToolScrollBarBitmap()),
(ID_NEW.SLIDER, images.getToolSliderBitmap()),
(ID_NEW.GAUGE, images.getToolGaugeBitmap()),
(ID_NEW.TREE_CTRL, images.getToolTreeCtrlBitmap()),
(ID_NEW.LIST_BOX, images.getToolListBoxBitmap()),
(ID_NEW.CHECK_LIST, images.getToolCheckListBitmap()),
(ID_NEW.LIST_CTRL, images.getToolListCtrlBitmap()),
(ID_NEW.NOTEBOOK, images.getToolNotebookBitmap()),
(ID_NEW.SPLITTER_WINDOW, images.getToolSplitterWindowBitmap()),
(ID_NEW.UNKNOWN, images.getToolUnknownBitmap())]
]
from tree import customCreateMap
if customCreateMap:
customGroup=['Custom']
for id in customCreateMap:
customGroup.append( (id, images.getToolUnknownBitmap()))
groups.append(customGroup)
for grp in groups:
self.AddGroup(grp[0])
for b in grp[1:]:
self.AddButton(b[0], b[1], g.pullDownMenu.createMap[b[0]])
self.SetAutoLayout(True)
self.SetSizerAndFit(self.sizer)
# Allow to be resized in vertical direction only
self.SetSizeHints(self.GetSize()[0], -1)
# Events
wx.EVT_COMMAND_RANGE(self, ID_NEW.PANEL, ID_NEW.LAST,
wx.wxEVT_COMMAND_BUTTON_CLICKED, g.frame.OnCreate)
wx.EVT_KEY_DOWN(self, self.OnKeyDown)
wx.EVT_KEY_UP(self, self.OnKeyUp)
def AddButton(self, id, image, text):
from wx.lib import buttons
button = buttons.GenBitmapButton(self, id, image, size=self.TOOL_SIZE,
style=wx.NO_BORDER|wx.WANTS_CHARS)
button.SetBezelWidth(0)
wx.EVT_KEY_DOWN(button, self.OnKeyDown)
wx.EVT_KEY_UP(button, self.OnKeyUp)
button.SetToolTipString(text)
self.curSizer.Add(button)
self.groups[-1][1][id] = button
def AddGroup(self, name):
# Each group is inside box
box = wx.StaticBox(self, -1, name, style=wx.WANTS_CHARS)
box.SetFont(g.smallerFont())
boxSizer = wx.StaticBoxSizer(box, wx.VERTICAL)
boxSizer.Add((0, 4))
self.curSizer = wx.GridSizer(0, 3)
boxSizer.Add(self.curSizer)
self.sizer.Add(boxSizer, 0, wx.TOP | wx.LEFT | wx.RIGHT, 4)
self.groups.append((box,{}))
# Enable/disable group
def EnableGroup(self, gnum, enable = True):
grp = self.groups[gnum]
grp[0].Enable(enable)
for b in grp[1].values(): b.Enable(enable)
# Enable/disable group item
def EnableGroupItem(self, gnum, id, enable = True):
grp = self.groups[gnum]
grp[1][id].Enable(enable)
# Enable/disable group items
def EnableGroupItems(self, gnum, ids, enable = True):
grp = self.groups[gnum]
for id in ids:
grp[1][id].Enable(enable)
# Process key events
def OnKeyDown(self, evt):
if evt.GetKeyCode() == wx.WXK_CONTROL:
g.tree.ctrl = True
elif evt.GetKeyCode() == wx.WXK_SHIFT:
g.tree.shift = True
self.UpdateIfNeeded()
evt.Skip()
def OnKeyUp(self, evt):
if evt.GetKeyCode() == wx.WXK_CONTROL:
g.tree.ctrl = False
elif evt.GetKeyCode() == wx.WXK_SHIFT:
g.tree.shift = False
self.UpdateIfNeeded()
evt.Skip()
def OnMouse(self, evt):
# Update control and shift states
g.tree.ctrl = evt.ControlDown()
g.tree.shift = evt.ShiftDown()
self.UpdateIfNeeded()
evt.Skip()
# Update UI after key presses, if necessary
def UpdateIfNeeded(self):
tree = g.tree
if self.ctrl != tree.ctrl or self.shift != tree.shift:
# Enabling is needed only for ctrl
if self.ctrl != tree.ctrl: self.UpdateUI()
self.ctrl = tree.ctrl
self.shift = tree.shift
if tree.ctrl:
status = 'SBL'
elif tree.shift:
status = 'INS'
else:
status = ''
g.frame.SetStatusText(status, 1)
# Update interface
def UpdateUI(self):
if not self.IsShown(): return
# Update status bar
tree = g.tree
item = tree.selection
# If nothing selected, disable everything and return
if not item:
# Disable everything
for grp in range(GROUPNUM):
self.EnableGroup(grp, False)
self.state = None
return
if tree.ctrl: needInsert = True
else: needInsert = tree.NeedInsert(item)
# Enable depending on selection
if item == tree.root or needInsert and tree.GetItemParent(item) == tree.root:
state = STATE_ROOT
else:
xxx = tree.GetPyData(item).treeObject()
# Check parent for possible child nodes if inserting sibling
if needInsert: xxx = xxx.parent
if xxx.__class__ == xxxMenuBar:
state = STATE_MENUBAR
elif xxx.__class__ in [xxxToolBar, xxxTool] or \
xxx.__class__ == xxxSeparator and xxx.parent.__class__ == xxxToolBar:
state = STATE_TOOLBAR
elif xxx.__class__ in [xxxMenu, xxxMenuItem]:
state = STATE_MENU
elif xxx.__class__ == xxxStdDialogButtonSizer:
state = STATE_STDDLGBTN
else:
state = STATE_ELSE
# Enable depending on selection
if state != self.state:
# Disable everything
for grp in range(GROUPNUM):
self.EnableGroup(grp, False)
# Enable some
if state == STATE_ROOT:
self.EnableGroup(GROUP_WINDOWS, True)
self.EnableGroup(GROUP_MENUS, True)
# But disable items
self.EnableGroupItems(GROUP_MENUS,
[ ID_NEW.TOOL,
ID_NEW.MENU_ITEM,
ID_NEW.SEPARATOR ],
False)
elif state == STATE_STDDLGBTN:
pass # nothing can be added from toolbar
elif state == STATE_MENUBAR:
self.EnableGroup(GROUP_MENUS)
self.EnableGroupItems(GROUP_MENUS,
[ ID_NEW.TOOL_BAR,
ID_NEW.MENU_BAR,
ID_NEW.TOOL ],
False)
elif state == STATE_TOOLBAR:
self.EnableGroup(GROUP_MENUS)
self.EnableGroupItems(GROUP_MENUS,
[ ID_NEW.TOOL_BAR,
ID_NEW.MENU,
ID_NEW.MENU_BAR,
ID_NEW.MENU_ITEM ],
False)
self.EnableGroup(GROUP_CONTROLS)
self.EnableGroupItems(GROUP_CONTROLS,
[ ID_NEW.TREE_CTRL,
ID_NEW.NOTEBOOK,
ID_NEW.SPLITTER_WINDOW ],
False)
elif state == STATE_MENU:
self.EnableGroup(GROUP_MENUS)
self.EnableGroupItems(GROUP_MENUS,
[ ID_NEW.TOOL_BAR,
ID_NEW.MENU_BAR,
ID_NEW.TOOL ],
False)
else:
self.EnableGroup(GROUP_WINDOWS)
self.EnableGroupItems(GROUP_WINDOWS,
[ ID_NEW.FRAME,
ID_NEW.DIALOG ],
False)
self.EnableGroup(GROUP_MENUS)
self.EnableGroupItems(GROUP_MENUS,
[ ID_NEW.MENU_BAR,
ID_NEW.MENU_BAR,
ID_NEW.MENU,
ID_NEW.MENU_ITEM,
ID_NEW.TOOL,
ID_NEW.SEPARATOR ],
False)
self.EnableGroup(GROUP_SIZERS)
self.EnableGroup(GROUP_CONTROLS)
# Special case for *book (always executed)
if state == STATE_ELSE:
if xxx.__class__ in [xxxNotebook, xxxChoicebook, xxxListbook]:
self.EnableGroup(GROUP_SIZERS, False)
else:
self.EnableGroup(GROUP_SIZERS)
if not (xxx.isSizer or xxx.parent and xxx.parent.isSizer):
self.EnableGroupItem(GROUP_SIZERS, ID_NEW.SPACER, False)
if xxx.__class__ == xxxFrame:
self.EnableGroupItem(GROUP_MENUS, ID_NEW.MENU_BAR)
# Save state
self.state = state
| andreas-p/admin4 | xrced/tools.py | Python | apache-2.0 | 12,508 |
# Lint as: python3
"""Unit tests for test_record module."""
import sys
import unittest
from openhtf.core import test_record
def _get_obj_size(obj):
size = 0
for attr in obj.__slots__: # pytype: disable=attribute-error
size += sys.getsizeof(attr)
size += sys.getsizeof(getattr(obj, attr))
return size
class TestRecordTest(unittest.TestCase):
def test_attachment_data(self):
expected_data = b'test attachment data'
attachment = test_record.Attachment(expected_data, 'text')
data = attachment.data
self.assertEqual(data, expected_data)
def test_attachment_memory_safety(self):
empty_attachment = test_record.Attachment(b'', 'text')
expected_obj_size = _get_obj_size(empty_attachment)
large_data = b'test attachment data' * 1000
attachment = test_record.Attachment(large_data, 'text')
obj_size = _get_obj_size(attachment)
self.assertEqual(obj_size, expected_obj_size)
| google/openhtf | test/core/test_record_test.py | Python | apache-2.0 | 931 |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from google.appengine.ext import db
from rogerthat.bizz.job import run_job
from rogerthat.models import ServiceIdentity
def job():
run_job(query, [], worker, [])
def worker(si_key):
si = db.get(si_key)
si.put()
def query():
return ServiceIdentity.all(keys_only=True)
| our-city-app/oca-backend | src/rogerthat/bizz/job/save_service_identities.py | Python | apache-2.0 | 928 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# See testable_service/integration_test.py and spinnaker_testing/spinnaker.py
# for more details.
#
# This test will use ssh to peek at the spinnaker configuration
# to determine the managed project it should verify, and to determine
# the spinnaker account name to use when sending it commands.
# Sample Usage:
# Assuming you have created $PASSPHRASE_FILE (which you should chmod 400)
# and $CITEST_ROOT points to the root directory of this repository
# (which is . if you execute this from the root). The passphrase file
# can be ommited if you run ssh-agent and add .ssh/compute_google_engine.
#
# Since this test runs a pipeline from a Jenkins trigger, you need to
# configure Jenkins in the following way.
# 1. Take note of your Jenkins server baseUrl,
# i.e <protocol>://<host>[:port]/[basePath]
# and store it as $JENKINS_URL.
#
# 2. Create a file, fill it with
# <username> <password>
# corresponding to valid Jenkins credentials, and store its path
# as $JENKINS_AUTH_PATH (also chmod 400).
# Or, set JENKINS_USER and JENKINS_PASSWORD environment variables.
#
# 3. Take note of the Jenkins master you have configured in Igor,
# and store its name as $JENKINS_MASTER.
#
# 4. Choose a name for your jenkins job and store it in $JENKINS_JOB.
#
# 5. On your Jenkins server, navigate to /job/$JENKINS_JOB/configure
# a) Under "Build Triggers", check "Trigger builds remotely".
# b) In the "Authentication Token" field, write some token
# and store it as $JENKINS_TOKEN.
# c) Add a build step that produces a file.
# mkdir -p somedir
# touch somedir/vim_2:7.4.052-1ubuntu3_amd64.deb
# Note that this might need to be consistent with the
# platform the bakery is on. The above works on Ubuntu 14.04
# d) Add post build action to archive the artifacts
# files to archive: somedir/vim_2:7.4.052-1ubuntu3_amd64.deb
#
#
# PYTHONPATH=$CITEST_ROOT/testing/citest \
# python $CITEST_ROOT/testing/citest/tests/bake_and_deploy_test.py \
# --gce_ssh_passphrase_file=$PASSPHRASE_FILE \
# --gce_project=$PROJECT \
# --gce_zone=$ZONE \
# --gce_instance=$INSTANCE \
# --jenkins_master=$JENKINS_MASTER \
# --jenkins_url=$JENKINS_URL \
# --jenkins_auth_path=$JENKINS_AUTH_PATH \
# --jenkins_job=$JENKINS_JOB \
# --jenkins_token=$JENKINS_TOKEN \
# --test_google \
# --test_aws
# or
# PYTHONPATH=$CITEST_ROOT/testing/citest \
# python $CITEST_ROOT/testing/citest/tests/bake_and_deploy_test.py \
# --native_hostname=host-running-smoke-test
# --managed_gce_project=$PROJECT \
# --test_gce_zone=$ZONE
# --jenkins_url=$JENKINS_URL \
# --jenkins_auth_path=$JENKINS_AUTH_PATH \
# --jenkins_job=$JENKINS_JOB \
# --jenkins_token=$JENKINS_TOKEN
# --test_google \
# --test_aws
# pylint: disable=bad-continuation
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
# Standard python modules.
import logging
import os
import sys
import time
# citest modules.
import citest.base
import citest.gcp_testing as gcp
import citest.json_contract as jc
import citest.json_predicate as jp
import citest.service_testing as st
# Spinnaker modules.
import spinnaker_testing as sk
import spinnaker_testing.gate as gate
ov_factory = jc.ObservationPredicateFactory()
class BakeAndDeployTestScenario(sk.SpinnakerTestScenario):
MINIMUM_PROJECT_QUOTA = {
'INSTANCE_TEMPLATES': 1,
'HEALTH_CHECKS': 1,
'FORWARDING_RULES': 1,
'IN_USE_ADDRESSES': 1,
'TARGET_POOLS': 1,
'IMAGES': 1,
}
MINIMUM_REGION_QUOTA = {
'CPUS': 1,
'IN_USE_ADDRESSES': 1,
'INSTANCE_GROUP_MANAGERS': 1,
'INSTANCES': 1,
}
@classmethod
def new_agent(cls, bindings):
return gate.new_agent(bindings)
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Initialize command line argument parser.
Args:
parser: argparse.ArgumentParser
"""
super(BakeAndDeployTestScenario, cls).initArgumentParser(
parser, defaults=defaults)
defaults = defaults or {}
parser.add_argument(
'--jenkins_master', default='',
help='The name of the jenkins master as configured in igor.'
' You may need to override this to an alias depending on firewalls.'
' The Spinnaker server may have permissions, but the citest machine'
' may not. Otherwise, this defaults to Spinnaker\'s binding.')
parser.add_argument(
'--jenkins_job', default='NoOpTrigger',
help='The name of the jenkins job to trigger off.'
' You will need to add this to your --jenkins_master.')
parser.add_argument(
'--jenkins_auth_path', default=None,
help='The path to a file containing the jenkins username password pair.'
'The contents should look like: <username> <password>.')
parser.add_argument(
'--jenkins_token', default='TRIGGER_TOKEN',
help='The authentication token for the jenkins build trigger.'
' This corresponds to the --jenkins_job on the --jenkins_url server')
parser.add_argument(
'--jenkins_url', default='',
help='The baseUrl of the jenkins service,'
' i.e. <protocol>://<host>[:port]/[basePath].'
' You may need to override this to an alias depending on firewalls.'
' The Spinnaker server may have permissions, but the citest machine'
' may not. Otherwise, this can be empty for Spinnaker\'s current'
' binding.')
parser.add_argument(
'--test_google', action='store_true',
help='Test Google pipelines.')
parser.add_argument(
'--test_aws', action='store_true',
help='Test AWS pipelines.')
def _do_init_bindings(self):
logger = logging.getLogger(__name__)
bindings = self.bindings
deployed = self.agent.deployed_config
yaml_node_path = 'services.jenkins.defaultMaster'
if not bindings.get('JENKINS_MASTER'):
bindings['JENKINS_MASTER'] = deployed[yaml_node_path + '.name']
logger.info('Infering JENKINS_MASTER %s', bindings['JENKINS_MASTER'])
if not bindings.get('JENKINS_URL'):
bindings['JENKINS_URL'] = deployed[yaml_node_path + '.baseUrl']
logger.info('Infering JENKINS_URL %s', bindings['JENKINS_URL'])
def __init__(self, bindings, agent=None):
super(BakeAndDeployTestScenario, self).__init__(bindings, agent)
self.logger = logging.getLogger(__name__)
bindings = self.bindings
# We'll call out the app name because it is widely used
# because it scopes the context of our activities.
self.TEST_APP = bindings['TEST_APP']
self.__short_lb_name = 'lb'
self.__full_lb_name = '{app}-{stack}-{detail}'.format(
app=self.TEST_APP, stack=bindings['TEST_STACK'],
detail=self.__short_lb_name)
self.aws_bake_pipeline_id = None
self.aws_destroy_pipeline_id = None
self.google_bake_pipeline_id = None
self.google_destroy_pipeline_id = None
self.__image_id_to_delete = None # Id of the baked image we need to clean up after the B & D pipelines run.
self.docker_pipeline_id = None
self.test_google = bindings['TEST_GOOGLE']
self.test_aws = bindings['TEST_AWS']
# This test has been exceeding the default timeout of 13 minutes for the Jenkins agent,
# so increase the timeout to 20 minutes.
self.jenkins_agent = sk.JenkinsAgent(bindings['JENKINS_URL'],
bindings['JENKINS_AUTH_PATH'],
self.agent, None, 1200)
self.run_tests = True
if not (self.test_google or self.test_aws):
self.run_tests = False
self.logger.warning(
'Neither --test_google nor --test_aws were set. '
'No tests will be run.')
def create_app(self):
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Application', retryable_for_secs=60)
.get_url_path('applications')
.contains_path_value('name', self.TEST_APP))
return st.OperationContract(
self.agent.make_create_app_operation(
bindings=self.bindings, application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_GOOGLE_ACCOUNT']),
builder.build())
def delete_app(self):
contract = jc.Contract()
return st.OperationContract(
self.agent.make_delete_app_operation(
application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_GOOGLE_ACCOUNT']),
contract=contract)
def create_load_balancer(self):
bindings = self.bindings
load_balancer_name = self.__full_lb_name
spec = {
'checkIntervalSec': 5,
'healthyThreshold': 2,
'unhealthyThreshold': 2,
'timeoutSec': 5,
'port': 80
}
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'gce',
'provider': 'gce',
'stack': bindings['TEST_STACK'],
'detail': self.__short_lb_name,
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'region': bindings['TEST_GCE_REGION'],
'ipProtocol': 'TCP',
'portRange': spec['port'],
'loadBalancerName': load_balancer_name,
'healthCheck': {
'port': spec['port'],
'timeoutSec': spec['timeoutSec'],
'checkIntervalSec': spec['checkIntervalSec'],
'healthyThreshold': spec['healthyThreshold'],
'unhealthyThreshold': spec['unhealthyThreshold'],
},
'type': 'upsertLoadBalancer',
'availabilityZones': {bindings['TEST_GCE_REGION']: []},
'user': '[anonymous]'
}],
description='Create Load Balancer: ' + load_balancer_name,
application=self.TEST_APP)
# We arent testing load balancers, so assume it is working,
# but we'll look for at the health check to know it is ready.
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Health Check Added',
retryable_for_secs=30)
.list_resource('httpHealthChecks')
.contains_path_value('name', load_balancer_name + '-hc'))
(builder.new_clause_builder('Load Balancer Created',
retryable_for_secs=60)
.list_resource('forwardingRules')
.contains_path_value('name', self.__full_lb_name))
return st.OperationContract(
self.new_post_operation(
title='create_load_balancer', data=payload,
path=('applications/{app}/tasks').format(app=self.TEST_APP)),
contract=builder.build())
def delete_load_balancer(self):
bindings = self.bindings
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'type': 'deleteLoadBalancer',
'cloudProvider': 'gce',
'loadBalancerName': self.__full_lb_name,
'region': bindings['TEST_GCE_REGION'],
'regions': [bindings['TEST_GCE_REGION']],
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'user': '[anonymous]'
}],
description='Delete Load Balancer: {0} in {1}:{2}'.format(
self.__full_lb_name,
bindings['SPINNAKER_GOOGLE_ACCOUNT'],
bindings['TEST_GCE_REGION']),
application=self.TEST_APP)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Health Check Removed', retryable_for_secs=30)
.list_resource('httpHealthChecks')
.excludes_path_value('name', self.__full_lb_name + '-hc'))
return st.OperationContract(
self.new_post_operation(
title='delete_load_balancer', data=payload,
path=('applications/{app}/tasks').format(app=self.TEST_APP)),
contract=builder.build())
def make_jenkins_trigger(self):
return {
'enabled': True,
'type': 'jenkins',
'master': self.bindings['JENKINS_MASTER'],
'job': self.bindings['JENKINS_JOB']
}
def make_bake_stage(self, package, providerType, requisiteStages=None,
**kwargs):
result = {
'requisiteStageRefIds':requisiteStages or [],
'refId': 'BAKE',
'type': 'bake',
'name': 'Bake',
'user': '[anonymous]',
'baseOs': 'trusty',
'baseLabel': 'release',
'cloudProviderType': providerType,
'package': package,
'rebake': True
}
result.update(kwargs)
return result
def make_deploy_google_stage(self, requisiteStages=None):
return {
'requisiteStageRefIds': requisiteStages or [],
'refId': 'DEPLOY',
'type': 'deploy',
'name': 'Deploy',
'clusters':[{
'application': self.TEST_APP,
'strategy': '',
'stack': self.bindings['TEST_STACK'],
'freeFormDetails': '',
'loadBalancers': [self.__full_lb_name],
'securityGroups': [],
'capacity': {
'min':1,
'max':1,
'desired':1
},
'zone': self.bindings['TEST_GCE_ZONE'],
'network': 'default',
'instanceMetadata': {
'startup-script':
'sudo apt-get update && sudo apt-get install apache2 -y',
'load-balancer-names': self.__full_lb_name
},
'tags': [],
'availabilityZones': {
self.bindings['TEST_GCE_REGION']: [self.bindings['TEST_GCE_ZONE']]
},
'cloudProvider': 'gce',
'provider': 'gce',
'instanceType': 'f1-micro',
'targetSize': 1,
'account': self.bindings['SPINNAKER_GOOGLE_ACCOUNT']
}]
}
def make_destroy_group_stage(self, cloudProvider, requisiteStages,
**kwargs):
result = {
'cloudProvider': cloudProvider,
'cloudProviderType': cloudProvider,
'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'name': 'Destroy Server Group',
'refId': 'DESTROY',
'requisiteStageRefIds': requisiteStages or [],
'target': 'current_asg_dynamic',
'regions': [self.bindings['TEST_GCE_REGION']],
'cluster': '{app}-{stack}'.format(
app=self.TEST_APP, stack=self.bindings['TEST_STACK']),
'type': 'destroyServerGroup'
}
result.update(kwargs)
return result
def make_disable_group_stage(self, cloudProvider, requisiteStages=None,
**kwargs):
result = {
'requisiteStageRefIds': requisiteStages or [],
'refId': 'DISABLE',
'type': 'disableServerGroup',
'name': 'Disable Server Group',
'cloudProviderType': cloudProvider,
'cloudProvider': cloudProvider,
'target': 'current_asg_dynamic',
'cluster': '{app}-{stack}'.format(
app=self.TEST_APP, stack=self.bindings['TEST_STACK']),
'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT']
}
result.update(kwargs)
return result
def create_bake_docker_pipeline(self):
name = 'BakeDocker'
self.docker_pipeline_id = name
bake_stage = self.make_bake_stage(
package='vim', providerType='docker', region='global')
pipeline_spec = dict(
name=name,
stages=[bake_stage],
triggers=[self.make_jenkins_trigger()],
application=self.TEST_APP,
stageCounter=1,
parallel=True,
limitConcurrent=True,
appConfig={}
)
payload = self.agent.make_json_payload_from_kwargs(**pipeline_spec)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline')
.get_url_path(
'applications/{app}/pipelineConfigs'.format(app=self.TEST_APP))
.contains_path_value(None, pipeline_spec))
return st.OperationContract(
self.new_post_operation(
title='create_bake_docker_pipeline', data=payload, path='pipelines',
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
def create_bake_and_deploy_google_pipeline(self):
name = 'BakeAndDeployGoogle'
self.google_bake_pipeline_id = name
bake_stage = self.make_bake_stage(
package='vim', providerType='gce', region='global')
deploy_stage = self.make_deploy_google_stage(requisiteStages=['BAKE'])
pipeline_spec = dict(
name=name,
stages=[bake_stage, deploy_stage],
triggers=[self.make_jenkins_trigger()],
application=self.TEST_APP,
stageCounter=2,
parallel=True,
limitConcurrent=True,
appConfig={}
)
payload = self.agent.make_json_payload_from_kwargs(**pipeline_spec)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline')
.get_url_path(
'applications/{app}/pipelineConfigs'.format(app=self.TEST_APP))
.contains_path_value(None, pipeline_spec))
return st.OperationContract(
self.new_post_operation(
title='create_bake_google_pipeline', data=payload, path='pipelines',
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
def create_disable_and_destroy_google_pipeline(self):
name = 'DisableAndDestroyGoogle'
self.google_destroy_pipeline_id = name
disable_stage = self.make_disable_group_stage(
cloudProvider='gce', regions=[self.bindings['TEST_GCE_REGION']])
destroy_stage = self.make_destroy_group_stage(
cloudProvider='gce', requisiteStages=['DISABLE'])
pipeline_spec = dict(
name=name,
stages=[disable_stage, destroy_stage],
application=self.TEST_APP,
stageCounter=2,
parallel=True,
limitConcurrent=True,
appConfig={}
)
payload = self.agent.make_json_payload_from_kwargs(**pipeline_spec)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline', retryable_for_secs=5)
.get_url_path(
'applications/{app}/pipelineConfigs'.format(app=self.TEST_APP))
.contains_path_value(None, pipeline_spec))
return st.OperationContract(
self.new_post_operation(
title='create_destroy_google_pipeline',
data=payload, path='pipelines',
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
def create_bake_and_deploy_aws_pipeline(self):
name = 'BakeAndDeployAws'
self.aws_bake_pipeline_id = name
bake_stage = self.make_bake_stage(
package='vim',
providerType='aws',
regions=[self.bindings['TEST_AWS_REGION']],
vmType='hvm', storeType='ebs')
# FIXME(jacobkiefer): this is creating a gce deploy stage in an aws
# pipeline. Not good.
deploy_stage = self.make_deploy_google_stage(requisiteStages=['BAKE'])
pipeline_spec = dict(
name=name,
stages=[bake_stage, deploy_stage],
triggers=[self.make_jenkins_trigger()],
application=self.TEST_APP,
stageCounter=2,
parallel=True
)
payload = self.agent.make_json_payload_from_kwargs(**pipeline_spec)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline')
.get_url_path(
'applications/{app}/pipelineConfigs'.format(app=self.TEST_APP))
.contains_path_value(None, pipeline_spec))
return st.OperationContract(
self.new_post_operation(
title='create_bake_aws_pipeline', data=payload, path='pipelines',
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
def create_disable_and_destroy_aws_pipeline(self):
name = 'DisableAndDestroyAws'
self.aws_destroy_pipeline_id = name
disable_stage = self.make_disable_group_stage(
cloudProvider='aws', regions=[self.bindings['TEST_AWS_REGION']])
destroy_stage = self.make_destroy_group_stage(
cloudProvider='aws', zones=[self.bindings['TEST_AWS_ZONE']],
requisiteStages=['DISABLE'])
pipeline_spec = dict(
name=name,
stages=[disable_stage, destroy_stage],
triggers=[self.make_jenkins_trigger()],
application=self.TEST_APP,
stageCounter=2,
parallel=True
)
payload = self.agent.make_json_payload_from_kwargs(**pipeline_spec)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline')
.get_url_path(
'applications/{app}/pipelineConfigs'.format(app=self.TEST_APP))
.contains_path_value(None, pipeline_spec))
return st.OperationContract(
self.new_post_operation(
title='create_destroy_aws_pipeline', data=payload, path='pipelines',
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
def delete_pipeline(self, pipeline_id):
payload = self.agent.make_json_payload_from_kwargs(id=pipeline_id)
path = os.path.join('pipelines', self.TEST_APP, pipeline_id)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline',
retryable_for_secs=5)
.get_url_path(
'applications/{app}/pipelineConfigs'.format(app=self.TEST_APP))
.excludes_path_value('name', pipeline_id))
return st.OperationContract(
self.new_delete_operation(
title='delete_bake_pipeline', data=payload, path=path,
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
def trigger_bake_and_deploy_google_pipeline(self):
path = 'applications/{app}/pipelines'.format(app=self.TEST_APP)
group_name = '{app}-{stack}-v000'.format(
app=self.TEST_APP, stack=self.bindings['TEST_STACK'])
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Managed Instance Group Deployed',
retryable_for_secs=30)
.inspect_resource('instanceGroupManagers', group_name)
.EXPECT(ov_factory.value_list_path_contains('targetSize', jp.NUM_EQ(1))))
return st.OperationContract(
self.jenkins_agent.new_jenkins_trigger_operation(
title='monitor_bake_pipeline',
job=self.bindings['JENKINS_JOB'],
token=self.bindings['JENKINS_TOKEN'],
status_class=gate.GatePipelineStatus,
status_path=path,
max_wait_secs=1080), # Allow 18 mins to bake and deploy.
contract=builder.build(),
cleanup=self.capture_baked_image)
def run_disable_and_destroy_google_pipeline(self, pipeline_id):
path = 'pipelines/{app}/{id}'.format(app=self.TEST_APP,
id=self.google_destroy_pipeline_id)
group_name = '{app}-{stack}-v000'.format(
app=self.TEST_APP, stack=self.bindings['TEST_STACK'])
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Managed Instance Group Destroyed')
.inspect_resource('instanceGroupManagers', group_name)
.EXPECT(ov_factory.error_list_contains(
gcp.HttpErrorPredicate(http_code=404)))
.OR(ov_factory.value_list_path_contains('targetSize', jp.NUM_EQ(0))))
return st.OperationContract(
self.new_post_operation(
title='run_destroy_pipeline',
data='',
path=path,
max_wait_secs=1080), # Allow 18 mins to disable and destroy.
contract=jc.Contract(),
cleanup=self.delete_baked_image)
def new_jenkins_build_operation(self):
return None
def capture_baked_image(self, execution_context):
"""Saves the baked image name from the triggered Bake & Deploy pipeline to delete later."""
status = execution_context.get('OperationStatus', None)
if status is None:
self.logger.info(
'Operation could not be performed so there is no image to delete.')
return
status = status.trigger_status
detail = status.detail_doc
if isinstance(detail, list):
if not detail:
self.logger.error('No trigger_status, so baked image is unknown\n'
'%s\n\n', status)
return
self.logger.info('Using first status.')
detail = detail[0]
stages = detail.get('stages', [])
image_id = (stages[0].get('context', {}).get('imageId')
if stages
else None)
self.logger.info('Capturing the baked image="%s" to delete', image_id)
self.__image_id_to_delete = image_id
def delete_baked_image(self, _unused_execution_context):
"""Deletes the baked image when we are done using it."""
if self.__image_id_to_delete:
execution_context = citest.base.ExecutionContext()
self.gcp_observer.invoke_resource(
execution_context, 'delete', 'images', resource_id=self.__image_id_to_delete)
class BakeAndDeployTest(st.AgentTestCase):
@staticmethod
def setUpClass():
runner = citest.base.TestRunner.global_runner()
scenario = runner.get_shared_data(BakeAndDeployTestScenario)
if not scenario.test_google:
return
managed_region = scenario.bindings['TEST_GCE_REGION']
title = 'Check Quota for {0}'.format(scenario.__class__.__name__)
verify_results = gcp.verify_quota(
title,
scenario.gcp_observer,
project_quota=BakeAndDeployTestScenario.MINIMUM_PROJECT_QUOTA,
regions=[(managed_region,
BakeAndDeployTestScenario.MINIMUM_REGION_QUOTA)])
if not verify_results:
raise RuntimeError('Insufficient Quota: {0}'.format(verify_results))
@property
def scenario(self):
return citest.base.TestRunner.global_runner().get_shared_data(
BakeAndDeployTestScenario)
@property
def testing_agent(self):
return self.scenario.agent
def test_a_create_app(self):
if not self.scenario.run_tests:
self.skipTest("No --test_{google, aws} flags were set")
else:
self.run_test_case(self.scenario.create_app())
def test_b_create_load_balancer(self):
if not self.scenario.run_tests:
self.skipTest("No --test_{google, aws} flags were set")
else:
self.run_test_case(self.scenario.create_load_balancer())
def test_c1_create_bake_and_deploy_google_pipeline(self):
if not self.scenario.test_google:
self.skipTest("--test_google flag not set")
else:
self.run_test_case(self.scenario.create_bake_and_deploy_google_pipeline())
def test_d1_create_disable_and_destroy_google_pipeline(self):
if not self.scenario.test_google:
self.skipTest("--test_google flag not set")
else:
self.run_test_case(
self.scenario.create_disable_and_destroy_google_pipeline())
def test_c2_create_bake_and_deploy_aws_pipeline(self):
if not self.scenario.test_aws:
self.skipTest("--test_aws flag not set")
else:
self.run_test_case(self.scenario.create_bake_and_deploy_aws_pipeline())
def test_d2_create_disable_and_destroy_aws_pipeline(self):
if not self.scenario.test_aws:
self.skipTest("--test_aws flag not set")
else:
self.run_test_case(
self.scenario.create_disable_and_destroy_aws_pipeline())
def test_e1_trigger_bake_and_deploy_google_pipeline(self):
if not self.scenario.test_google:
self.skipTest("--test_google flag not set")
else:
# Wait for Echo's cache to pick up the pipeline.
time.sleep(20)
self.run_test_case(
self.scenario.trigger_bake_and_deploy_google_pipeline(),
poll_every_secs=5)
def test_w1_run_disable_and_destroy_google_pipeline(self):
if not self.scenario.test_google:
self.skipTest("--test_google flag not set")
else:
self.run_test_case(
self.scenario.run_disable_and_destroy_google_pipeline(
self.scenario.google_destroy_pipeline_id),
poll_every_secs=5)
def test_x1_delete_google_bake_pipeline(self):
if not self.scenario.test_google:
self.skipTest("--test_google flag not set")
else:
self.run_test_case(
self.scenario.delete_pipeline(self.scenario.google_bake_pipeline_id))
def test_x1_delete_google_destroy_pipeline(self):
if not self.scenario.test_google:
self.skipTest("--test_google flag not set")
else:
self.run_test_case(
self.scenario.delete_pipeline(self.scenario.google_destroy_pipeline_id))
def test_x2_delete_aws_pipeline(self):
if not self.scenario.test_aws:
self.skipTest("--test_aws flag not set")
else:
self.run_test_case(
self.scenario.delete_pipeline(self.scenario.aws_pipeline_id))
def test_y_delete_load_balancer(self):
if not self.scenario.run_tests:
self.skipTest("No --test_{google, aws} flags were set")
else:
self.run_test_case(self.scenario.delete_load_balancer(),
max_retries=5)
def test_z_delete_app(self):
if not self.scenario.run_tests:
self.skipTest("No --test_{google, aws} flags were set")
# Give a total of a minute because it might also need
# an internal cache update
else:
self.run_test_case(self.scenario.delete_app(),
retry_interval_secs=8, max_retries=8)
def main():
defaults = {
'TEST_STACK': 'baketest' + BakeAndDeployTestScenario.DEFAULT_TEST_ID,
'TEST_APP': 'baketest' + BakeAndDeployTestScenario.DEFAULT_TEST_ID
}
return citest.base.TestRunner.main(
parser_inits=[BakeAndDeployTestScenario.initArgumentParser],
default_binding_overrides=defaults,
test_case_list=[BakeAndDeployTest])
if __name__ == '__main__':
sys.exit(main())
| ewiseblatt/spinnaker | testing/citest/tests/bake_and_deploy_test.py | Python | apache-2.0 | 30,181 |
# -*- coding:utf-8 -*-
#html_doc = '''<div><a href="http://www.weblio.jp/content/%E5%BD%A2%E5%AE%B9%E5%8B%95%E8%A9%9E" title="形容動詞の意味" class=crosslink>形容動詞</a>「<a href="http://www.weblio.jp/content/%E3%82%A2%E3%83%BC%E3%83%86%E3%82%A3%E3%83%95%E3%82%A3%E3%82%B7%E3%83%A3%E3%83%AB" title="アーティフィシャルの意味" class=crosslink>アーティフィシャル</a>だ」が、<a href="http://www.weblio.jp/content/%E6%8E%A5%E5%B0%BE%E8%AA%9E" title="接尾語の意味" class=crosslink>接尾語</a>「さ」により<a href="http://www.weblio.jp/content/%E4%BD%93%E8%A8%80" title="体言の意味" class=crosslink>体言</a>化した形。<br><br class=nhgktD><div><!--AVOID_CROSSLINK--><p class=nhgktL>終止形</p><p class=nhgktR>アーティフィシャルだ <a href="http://www.weblio.jp/content/%E3%82%A2%E3%83%BC%E3%83%86%E3%82%A3%E3%83%95%E3%82%A3%E3%82%B7%E3%83%A3%E3%83%AB" title="アーティフィシャル">» 「アーティフィシャル」の意味を調べる</a></p><!--/AVOID_CROSSLINK--><br class=clr></div>'''
#from bs4 import BeautifulSoup
#soup = BeautifulSoup(html_doc, 'html.parser')
#a = [text for text in soup.stripped_strings]
#print ''.join(a[:-1])
import socket
import urllib2
import traceback
import re
#import MySQLdb
import time
from bs4 import BeautifulSoup
#from complainDetail import *
timeout = 10
socket.setdefaulttimeout(timeout)
def fetchDetail(link, word):
tryNum = 3
tn = 0
while tn < tryNum:
details = []
try:
f = urllib2.urlopen(link)
content = f.read()
soup = BeautifulSoup(content, 'html.parser')
main = soup.find(attrs={'class':'Nhgkt'})
left = soup.find_all(attrs={'class':'nhgktL'})
right = soup.find_all(attrs={'class':'nhgktR'})
if(left):
for text in main.stripped_strings:
if(re.match(u'終止形$', text)!=None):break
details.append(text)
print '#'.join(details).encode('utf8'),
print '%',left[0].string.encode('utf8'), ':',
aList = right[0].find_all('a')
for a in aList:
print a['title'].encode('utf8'),
print
else:
for text in main.stripped_strings:
if(u'»' in text):break
details.append(text)
print '#'.join(details).encode('utf8')
break
except Exception,e:
print e
tn = tn + 1
#print url, " access error!"
#print "try ", tn, "time"
time.sleep(5)
if tn==tryNum:
#print "Cannot fetch page!"
return -1
return 0
if __name__ == "__main__":
wordsUrlList = open('verb_ok.txt')
for line in wordsUrlList.readlines():
l = line.split(' ')
link = l[0]
word = l[1].strip('\n')
print word, '%', link, '%',
if(fetchDetail(link, word)==-1):
print link, word, "ERROR."
print "Finished"
#indexUrl = "http://www.weblio.jp/category/dictionary/nhgkt/aa"
#f = urllib2.urlopen(indexUrl)
#content = f.read()
#soup = BeautifulSoup(content, 'html.parser')
#urlTable = soup.find(attrs={'class':'kanaAlpha'})
#aList = urlTable.find_all('a')
#for a in aList:
# print '"'+a['href']+'",'
| pprivulet/DataScience | Dic/getDetail.py | Python | apache-2.0 | 3,537 |
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" In-Memory Object Server for Swift """
import os
from swift import gettext_ as _
from eventlet import Timeout
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.http import is_success
from swift.obj.mem_diskfile import InMemoryFileSystem
from swift.obj import server
class ObjectController(server.ObjectController):
"""
Implements the WSGI application for the Swift In-Memory Object Server.
"""
def setup(self, conf):
"""
Nothing specific to do for the in-memory version.
:param conf: WSGI configuration parameter
"""
self._filesystem = InMemoryFileSystem()
def get_diskfile(self, device, partition, account, container, obj,
**kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
return self._filesystem.get_diskfile(account, container, obj, **kwargs)
def async_update(self, op, account, container, obj, host, partition,
contdevice, headers_out, objdevice, policy_idx):
"""
Sends or saves an async update.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param host: host that the container is on
:param partition: partition that the container is on
:param contdevice: device name that the container is on
:param headers_out: dictionary of headers to send in the container
request
:param objdevice: device name that the object is in
:param policy_idx: the associated storage policy index
"""
headers_out['user-agent'] = 'obj-server %s' % os.getpid()
full_path = '/%s/%s/%s' % (account, container, obj)
if all([host, partition, contdevice]):
try:
with ConnectionTimeout(self.conn_timeout):
ip, port = host.rsplit(':', 1)
conn = http_connect(ip, port, contdevice, partition, op,
full_path, headers_out)
with Timeout(self.node_timeout):
response = conn.getresponse()
response.read()
if is_success(response.status):
return
else:
self.logger.error(_(
'ERROR Container update failed: %(status)d '
'response from %(ip)s:%(port)s/%(dev)s'),
{'status': response.status, 'ip': ip, 'port': port,
'dev': contdevice})
except (Exception, Timeout):
self.logger.exception(_(
'ERROR container update failed with '
'%(ip)s:%(port)s/%(dev)s'),
{'ip': ip, 'port': port, 'dev': contdevice})
# FIXME: For now don't handle async updates
def REPLICATE(self, request):
"""
Handle REPLICATE requests for the Swift Object Server. This is used
by the object replicator to get hashes for directories.
"""
pass
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ObjectController(conf)
| Khushbu27/Tutorial | swift/obj/mem_server.py | Python | apache-2.0 | 4,314 |