code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
try: datafile = file(datafilepath, "wb") except Exception: return False if self._timestampFormat is None: self._timestampFormat = _STR_EPOCHS datafile.write("# time_as_<%s> value..." % self._timestampFormat) convert = TimeSeries.convert_epoch_to_timestamp for datapoint in self._timeseriesData: timestamp = datapoint[0] values = datapoint[1:] if self._timestampFormat is not None: timestamp = convert(timestamp, self._timestampFormat) datafile.write("%s %s" % (timestamp, " ".join([str(entry) for entry in values]))) datafile.close() return True
def to_gnuplot_datafile(self, datafilepath)
Dumps the TimeSeries into a gnuplot compatible data file. :param string datafilepath: Path used to create the file. If that file already exists, it will be overwritten! :return: Returns :py:const:`True` if the data could be written, :py:const:`False` otherwise. :rtype: boolean
4.352761
4.222089
1.03095
if self.exclude_regex and is_file: if self.exclude_regex.match(filename): return False return True else: return True
def include(self, filename, is_file)
Determines if a file should be included in a project for uploading. If file_exclude_regex is empty it will include everything. :param filename: str: filename to match it should not include directory :param is_file: bool: is this a file if not this will always return true :return: boolean: True if we should include the file.
3.280476
3.317302
0.988899
full_pattern = '{}{}{}'.format(dir_name, os.sep, pattern) filename_regex = fnmatch.translate(full_pattern) self.regex_list.append(re.compile(filename_regex))
def add_filename_pattern(self, dir_name, pattern)
Adds a Unix shell-style wildcard pattern underneath the specified directory :param dir_name: str: directory that contains the pattern :param pattern: str: Unix shell-style wildcard pattern
3.217839
3.62674
0.887254
for regex_item in self.regex_list: if regex_item.match(path): return False return True
def include(self, path)
Returns False if any pattern matches the path :param path: str: filename path to test :return: boolean: True if we should include this path
4.793919
5.339808
0.89777
for dir_name, child_dirs, child_files in os.walk(top_path, followlinks=followlinks): for child_filename in child_files: if child_filename == DDS_IGNORE_FILENAME: pattern_lines = self._read_non_empty_lines(dir_name, child_filename) self.add_patterns(dir_name, pattern_lines)
def load_directory(self, top_path, followlinks)
Traverse top_path directory and save patterns in any .ddsignore files found. :param top_path: str: directory name we should traverse looking for ignore files :param followlinks: boolean: should we traverse symbolic links
4.107792
3.44017
1.194067
for pattern_line in pattern_lines: self.pattern_list.add_filename_pattern(dir_name, pattern_line)
def add_patterns(self, dir_name, pattern_lines)
Add patterns the should apply below dir_name :param dir_name: str: directory that contained the patterns :param pattern_lines: [str]: array of patterns
3.578059
4.430804
0.807542
return self.pattern_list.include(path) and self.file_filter.include(os.path.basename(path), is_file)
def include(self, path, is_file)
Returns False if any pattern matches the path :param path: str: filename path to test :return: boolean: True if we should include this path
6.012313
5.859283
1.026118
self._cache_project_list_once() if name in [project.name for project in self.projects]: raise DuplicateNameError("There is already a project named {}".format(name)) self.client.create_project(name, description) self.clear_project_cache() return name
def create_project(self, name, description)
Create a project with the specified name and description :param name: str: unique name for this project :param description: str: long description of this project :return: str: name of the project
3.884189
4.188716
0.927298
project = self._get_project_for_name(project_name) project.delete() self.clear_project_cache()
def delete_project(self, project_name)
Delete a project with the specified name. Raises ItemNotFound if no such project exists :param project_name: str: name of the project to delete :return:
4.063573
4.970108
0.817603
project = self._get_project_for_name(project_name) file_path_dict = self._get_file_path_dict_for_project(project) return list(file_path_dict)
def list_files(self, project_name)
Return a list of file paths that make up project_name :param project_name: str: specifies the name of the project to list contents of :return: [str]: returns a list of remote paths for all files part of the specified project qq
3.14548
3.432492
0.916384
project = self._get_project_for_name(project_name) file = project.get_child_for_path(remote_path) file.download_to_path(local_path)
def download_file(self, project_name, remote_path, local_path=None)
Download a file from a project When local_path is None the file will be downloaded to the base filename :param project_name: str: name of the project to download a file from :param remote_path: str: remote path specifying which file to download :param local_path: str: optional argument to customize where the file will be downloaded to
3.370422
3.754096
0.897799
project = self._get_or_create_project(project_name) file_upload = FileUpload(project, remote_path, local_path) file_upload.run()
def upload_file(self, project_name, local_path, remote_path=None)
Upload a file into project creating a new version if it already exists. Will also create project and parent folders if they do not exist. :param project_name: str: name of the project to upload a file to :param local_path: str: path to download the file into :param remote_path: str: remote path specifying file to upload to (defaults to local_path basename)
3.302377
3.485533
0.947452
project = self._get_or_create_project(project_name) remote_file = project.get_child_for_path(remote_path) remote_file.delete()
def delete_file(self, project_name, remote_path)
Delete a file or folder from a project :param project_name: str: name of the project containing a file we will delete :param remote_path: str: remote path specifying file to delete
3.344108
3.860271
0.866288
try: seek = fObj.seek tell = fObj.tell except AttributeError: return UNKNOWN_LENGTH originalPosition = tell() seek(0, self._SEEK_END) end = tell() seek(originalPosition, self._SEEK_SET) return end - originalPosition
def _determineLength(self, fObj)
Determine how many bytes can be read out of C{fObj} (assuming it is not modified from this point on). If the determination cannot be made, return C{UNKNOWN_LENGTH}.
3.531334
2.932253
1.204308
self._task = self._cooperate(self._writeloop(consumer)) d = self._task.whenDone() def maybeStopped(reason): # IBodyProducer.startProducing's Deferred isn't support to fire if # stopProducing is called. reason.trap(task.TaskStopped) return defer.Deferred() d.addCallbacks(lambda ignored: None, maybeStopped) return d
def startProducing(self, consumer)
Start a cooperative task which will read bytes from the input file and write them to C{consumer}. Return a L{Deferred} which fires after all bytes have been written. @param consumer: Any L{IConsumer} provider
8.602439
8.398457
1.024288
while True: bytes = self._inputFile.read(self._readSize) if not bytes: self._inputFile.close() break consumer.write(bytes) yield None
def _writeloop(self, consumer)
Return an iterator which reads one chunk of bytes from the input file and writes them to the consumer for each time it is iterated.
4.981359
3.28033
1.518554
different_items = LocalOnlyCounter(self.config.upload_bytes_per_chunk) different_items.walk_project(self.local_project) return different_items
def _count_differences(self)
Count how many things we will be sending. :param local_project: LocalProject project we will send data from :return: LocalOnlyCounter contains counts for various items
19.604746
8.759197
2.23819
progress_printer = ProgressPrinter(self.different_items.total_items(), msg_verb='sending') upload_settings = UploadSettings(self.config, self.remote_store.data_service, progress_printer, self.project_name_or_id, self.file_upload_post_processor) project_uploader = ProjectUploader(upload_settings) project_uploader.run(self.local_project) progress_printer.finished()
def run(self)
Upload different items within local_project to remote store showing a progress bar.
9.250125
6.526146
1.417395
project_uploader = ProjectUploadDryRun() project_uploader.run(self.local_project) items = project_uploader.upload_items if not items: return "\n\nNo changes found. Nothing needs to be uploaded.\n\n" else: result = "\n\nFiles/Folders that need to be uploaded:\n" for item in items: result += "{}\n".format(item) result += "\n" return result
def dry_run_report(self)
Returns text displaying the items that need to be uploaded or a message saying there are no files/folders to upload. :return: str: report text
4.278532
3.594812
1.190196
project = self.remote_store.fetch_remote_project(self.project_name_or_id, must_exist=True, include_children=False) report = UploadReport(project.name) report.walk_project(self.local_project) return report.get_content()
def get_upload_report(self)
Generate and print a report onto stdout.
7.559338
6.891497
1.096908
msg = 'URL to view project' project_id = self.local_project.remote_id url = '{}: https://{}/#/project/{}'.format(msg, self.config.get_portal_url_base(), project_id) return url
def get_url_msg(self)
Print url to view the project via dds portal.
9.145974
6.525898
1.401489
if item.need_to_send: self.files += 1 self.chunks += item.count_chunks(self.bytes_per_chunk)
def visit_file(self, item, parent)
Increments counter if item needs to be sent. :param item: LocalFile :param parent: LocalFolder/LocalProject
8.222002
6.691469
1.228729
return '{}, {}, {}'.format(LocalOnlyCounter.plural_fmt('project', self.projects), LocalOnlyCounter.plural_fmt('folder', self.folders), LocalOnlyCounter.plural_fmt('file', self.files))
def result_str(self)
Return a string representing the totals contained herein. :return: str counts/types string
6.005357
6.061357
0.990761
if cnt == 1: return '{} {}'.format(cnt, name) else: return '{} {}s'.format(cnt, name)
def plural_fmt(name, cnt)
pluralize name if necessary and combine with cnt :param name: str name of the item type :param cnt: int number items of this type :return: str name and cnt joined
2.138303
2.664664
0.802466
if item.sent_to_remote: self._add_report_item(item.path, item.remote_id)
def visit_folder(self, item, parent)
Add folder to the report if it was sent. :param item: LocalFolder folder to possibly add :param parent: LocalFolder/LocalContent not used here
12.524128
8.445263
1.482977
if item.sent_to_remote: self._add_report_item(item.path, item.remote_id, item.size, item.get_hash_value())
def visit_file(self, item, parent)
Add file to the report if it was sent. :param item: LocalFile file to possibly add. :param parent: LocalFolder/LocalContent not used here
10.024667
7.852129
1.276681
name_str = self.name.ljust(max_name) remote_id_str = self.remote_id.ljust(max_remote_id) size_str = self.size.ljust(max_size) return u'{} {} {} {}'.format(name_str, remote_id_str, size_str, self.file_hash)
def str_with_sizes(self, max_name, max_remote_id, max_size)
Create string for report based on internal properties using sizes to line up columns. :param max_name: int width of the name column :param max_remote_id: int width of the remote_id column :return: str info from this report item
1.956118
2.141748
0.913328
auth = DataServiceAuth(config) auth.set_auth_data(data_service_auth_data) data_service = DataServiceApi(auth, config.url) sender = ChunkSender(data_service, upload_id, filename, config.upload_bytes_per_chunk, index, num_chunks_to_send, progress_queue) try: sender.send() except: error_msg = "".join(traceback.format_exception(*sys.exc_info())) progress_queue.error(error_msg)
def upload_async(data_service_auth_data, config, upload_id, filename, index, num_chunks_to_send, progress_queue)
Method run in another process called from ParallelChunkProcessor.make_and_start_process. :param data_service_auth_data: tuple of auth data for rebuilding DataServiceAuth :param config: dds.Config configuration settings to use during upload :param upload_id: uuid unique id of the 'upload' we are uploading chunks into :param filename: str path to file who's contents we will be uploading :param index: int offset into filename where we will start sending bytes from (must multiply by upload_bytes_per_chunk) :param num_chunks_to_send: int number of chunks of config.upload_bytes_per_chunk size to send. :param progress_queue: ProgressQueue queue to send notifications of progress or errors
2.823334
2.707783
1.042673
path_data = self.local_file.get_path_data() hash_data = path_data.get_hash() self.upload_id = self.upload_operations.create_upload(project_id, path_data, hash_data, storage_provider_id=self.config.storage_provider_id) ParallelChunkProcessor(self).run() parent_data = ParentData(parent_kind, parent_id) remote_file_data = self.upload_operations.finish_upload(self.upload_id, hash_data, parent_data, self.local_file.remote_id) if self.file_upload_post_processor: self.file_upload_post_processor.run(self.data_service, remote_file_data) return remote_file_data['id']
def upload(self, project_id, parent_kind, parent_id)
Upload file contents to project within a specified parent. :param project_id: str project uuid :param parent_kind: str type of parent ('dds-project' or 'dds-folder') :param parent_id: str uuid of parent :return: str uuid of the newly uploaded file
3.912377
4.084587
0.957839
if not remote_filename: remote_filename = path_data.name() mime_type = path_data.mime_type() size = path_data.size() def func(): return self.data_service.create_upload(project_id, remote_filename, mime_type, size, hash_data.value, hash_data.alg, storage_provider_id=storage_provider_id, chunked=chunked) resp = retry_until_resource_is_consistent(func, self.waiting_monitor) return resp.json()
def _create_upload(self, project_id, path_data, hash_data, remote_filename=None, storage_provider_id=None, chunked=True)
Create upload for uploading multiple chunks or the non-chunked variety (includes upload url). :param project_id: str: uuid of the project :param path_data: PathData: holds file system data about the file we are uploading :param hash_data: HashData: contains hash alg and value for the file we are uploading :param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise) :param storage_provider_id: str: optional storage provider id :param chunked: bool: should we create a chunked upload :return: str: uuid for the upload
3.660621
3.670179
0.997396
upload_response = self._create_upload(project_id, path_data, hash_data, remote_filename=remote_filename, storage_provider_id=storage_provider_id, chunked=True) return upload_response['id']
def create_upload(self, project_id, path_data, hash_data, remote_filename=None, storage_provider_id=None)
Create a chunked upload id to pass to create_file_chunk_url to create upload urls. :param project_id: str: uuid of the project :param path_data: PathData: holds file system data about the file we are uploading :param hash_data: HashData: contains hash alg and value for the file we are uploading :param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise) :param storage_provider_id: str: optional storage provider id :return: str: uuid for the upload
2.474769
2.888228
0.856847
upload_response = self._create_upload(project_id, path_data, hash_data, remote_filename=remote_filename, storage_provider_id=storage_provider_id, chunked=False) return upload_response['id'], upload_response['signed_url']
def create_upload_and_chunk_url(self, project_id, path_data, hash_data, remote_filename=None, storage_provider_id=None)
Create an non-chunked upload that returns upload id and upload url. This type of upload doesn't allow additional upload urls. For single chunk files this method is more efficient than create_upload/create_file_chunk_url. :param project_id: str: uuid of the project :param path_data: PathData: holds file system data about the file we are uploading :param hash_data: HashData: contains hash alg and value for the file we are uploading :param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise) :param storage_provider_id:str: optional storage provider id :return: str, dict: uuid for the upload, upload chunk url dict
2.574796
2.607153
0.987589
chunk_len = len(chunk) hash_data = HashData.create_from_chunk(chunk) one_based_index = chunk_num + 1 def func(): return self.data_service.create_upload_url(upload_id, one_based_index, chunk_len, hash_data.value, hash_data.alg) resp = retry_until_resource_is_consistent(func, self.waiting_monitor) return resp.json()
def create_file_chunk_url(self, upload_id, chunk_num, chunk)
Create a url for uploading a particular chunk to the datastore. :param upload_id: str: uuid of the upload this chunk is for :param chunk_num: int: where in the file does this chunk go (0-based index) :param chunk: bytes: data we are going to upload :return:
5.776412
6.428957
0.898499
http_verb = url_json['http_verb'] host = url_json['host'] url = url_json['url'] http_headers = url_json['http_headers'] resp = self._send_file_external_with_retry(http_verb, host, url, http_headers, chunk) if resp.status_code != 200 and resp.status_code != 201: raise ValueError("Failed to send file to external store. Error:" + str(resp.status_code) + host + url)
def send_file_external(self, url_json, chunk)
Send chunk to external store specified in url_json. Raises ValueError on upload failure. :param data_service: data service to use for sending chunk :param url_json: dict contains where/how to upload chunk :param chunk: data to be uploaded
2.795044
2.612483
1.06988
count = 0 retry_times = 1 if http_verb == 'PUT': retry_times = SEND_EXTERNAL_PUT_RETRY_TIMES while True: try: return self.data_service.send_external(http_verb, host, url, http_headers, chunk) except requests.exceptions.ConnectionError: count += 1 if count < retry_times: if count == 1: # Only show a warning the first time we fail to send a chunk self._show_retry_warning(host) time.sleep(SEND_EXTERNAL_RETRY_SECONDS) self.data_service.recreate_requests_session() else: raise
def _send_file_external_with_retry(self, http_verb, host, url, http_headers, chunk)
Send chunk to host, url using http_verb. If http_verb is PUT and a connection error occurs retry a few times. Pauses between retries. Raises if unsuccessful.
3.36323
3.106116
1.082777
sys.stderr.write("\nConnection to {} failed. Retrying.\n".format(host)) sys.stderr.flush()
def _show_retry_warning(host)
Displays a message on stderr that we lost connection to a host and will retry. :param host: str: name of the host we are trying to communicate with
4.182372
4.484168
0.932697
self.data_service.complete_upload(upload_id, hash_data.value, hash_data.alg) if remote_file_id: result = self.data_service.update_file(remote_file_id, upload_id) return result.json() else: result = self.data_service.create_file(parent_data.kind, parent_data.id, upload_id) return result.json()
def finish_upload(self, upload_id, hash_data, parent_data, remote_file_id)
Complete the upload and create or update the file. :param upload_id: str: uuid of the upload we are completing :param hash_data: HashData: hash info about the file :param parent_data: ParentData: info about the parent of this file :param remote_file_id: str: uuid of this file if it already exists or None if it is a new file :return: dict: DukeDS details about this file
2.95351
2.659266
1.110649
processes = [] progress_queue = ProgressQueue(Queue()) num_chunks = ParallelChunkProcessor.determine_num_chunks(self.config.upload_bytes_per_chunk, self.local_file.size) work_parcels = ParallelChunkProcessor.make_work_parcels(self.config.upload_workers, num_chunks) for (index, num_items) in work_parcels: processes.append(self.make_and_start_process(index, num_items, progress_queue)) wait_for_processes(processes, num_chunks, progress_queue, self.watcher, self.local_file)
def run(self)
Sends contents of a local file to a remote data service.
4.942703
4.642146
1.064745
if file_size == 0: return 1 return int(math.ceil(float(file_size) / float(chunk_size)))
def determine_num_chunks(chunk_size, file_size)
Figure out how many pieces we are sending the file in. NOTE: duke-data-service requires an empty chunk to be uploaded for empty files.
2.375537
1.987994
1.194942
chunks_per_worker = int(math.ceil(float(num_chunks) / float(upload_workers))) return ParallelChunkProcessor.divide_work(range(num_chunks), chunks_per_worker)
def make_work_parcels(upload_workers, num_chunks)
Make groups so we can split up num_chunks into similar sizes. Rounds up trying to keep work evenly split so sometimes it will not use all workers. For very small numbers it can result in (upload_workers-1) total workers. For example if there are two few items to distribute. :param upload_workers: int target number of workers :param num_chunks: int number of total items we need to send :return [(index, num_items)] - an array of tuples where array element will be in a separate process.
4.578235
4.493274
1.018908
grouped_indexes = [list_of_indexes[i:i + batch_size] for i in range(0, len(list_of_indexes), batch_size)] return [(batch[0], len(batch)) for batch in grouped_indexes]
def divide_work(list_of_indexes, batch_size)
Given a sequential list of indexes split them into num_parts. :param list_of_indexes: [int] list of indexes to be divided up :param batch_size: number of items to put in batch(not exact obviously) :return: [(int,int)] list of (index, num_items) to be processed
2.297294
2.561505
0.896853
process = Process(target=upload_async, args=(self.data_service.auth.get_auth_data(), self.config, self.upload_id, self.local_file.path, index, num_items, progress_queue)) process.start() return process
def make_and_start_process(self, index, num_items, progress_queue)
Create and start a process to upload num_items chunks from our file starting at index. :param index: int offset into file(must be multiplied by upload_bytes_per_chunk to get actual location) :param num_items: int number chunks to send :param progress_queue: ProgressQueue queue to send notifications of progress or errors
5.46016
5.204966
1.049029
sent_chunks = 0 chunk_num = self.index with open(self.filename, 'rb') as infile: infile.seek(self.index * self.chunk_size) while sent_chunks != self.num_chunks_to_send: chunk = infile.read(self.chunk_size) self._send_chunk(chunk, chunk_num) self.progress_queue.processed(1) chunk_num += 1 sent_chunks += 1
def send(self)
For each chunk we need to send, create upload url and send bytes. Raises exception on error.
3.149642
2.851331
1.104622
url_info = self.upload_operations.create_file_chunk_url(self.upload_id, chunk_num, chunk) self.upload_operations.send_file_external(url_info, chunk)
def _send_chunk(self, chunk, chunk_num)
Send a single chunk to the remote service. :param chunk: bytes data we are uploading :param chunk_num: int number associated with this chunk
6.514436
6.869654
0.948292
# return True, if not interval is defined for the parameter if parameter not in self._parameterIntervals: return True interval = self._parameterIntervals[parameter] if interval[2] and interval[3]: return interval[0] <= value <= interval[1] if not interval[2] and interval[3]: return interval[0] < value <= interval[1] if interval[2] and not interval[3]: return interval[0] <= value < interval[1] #if False == interval[2] and False == interval[3]: return interval[0] < value < interval[1]
def _in_valid_interval(self, parameter, value)
Returns if the parameter is within its valid interval. :param string parameter: Name of the parameter that has to be checked. :param numeric value: Value of the parameter. :return: Returns :py:const:`True` it the value for the given parameter is valid, :py:const:`False` otherwise. :rtype: boolean
2.783153
2.794722
0.99586
# return if not interval is defined for the parameter if parameter not in self._parameterIntervals: return interval = self._parameterIntervals[parameter] return "%s has to be in %s%s, %s%s. Current value is %s." % ( parameter, BaseMethod._interval_definitions[interval[2]][0], interval[0], interval[1], BaseMethod._interval_definitions[interval[3]][1], value )
def _get_value_error_message_for_invalid_prarameter(self, parameter, value)
Returns the ValueError message for the given parameter. :param string parameter: Name of the parameter the message has to be created for. :param numeric value: Value outside the parameters interval. :return: Returns a string containing hte message. :rtype: string
5.248976
5.088074
1.031623
if not self._in_valid_interval(name, value): raise ValueError(self._get_value_error_message_for_invalid_prarameter(name, value)) #if name in self._parameters: # print "Parameter %s already existed. It's old value will be replaced with %s" % (name, value) self._parameters[name] = value
def set_parameter(self, name, value)
Sets a parameter for the BaseMethod. :param string name: Name of the parameter that has to be checked. :param numeric value: Value of the parameter.
4.925903
5.135762
0.959138
missingParams = filter(lambda rp: rp not in self._parameters, self._requiredParameters) return len(missingParams) == 0
def can_be_executed(self)
Returns if the method can already be executed. :return: Returns :py:const:`True` if all required parameters where already set, False otherwise. :rtype: boolean
8.620222
7.4701
1.153963
# set the furecast until variable to None if necessary if name == "valuesToForecast": self._forecastUntil = None # continue with the parents implementation return super(BaseForecastingMethod, self).set_parameter(name, value)
def set_parameter(self, name, value)
Sets a parameter for the BaseForecastingMethod. :param string name: Name of the parameter. :param numeric value: Value of the parameter.
17.764856
15.1
1.176481
if tsformat is not None: timestamp = TimeSeries.convert_timestamp_to_epoch(timestamp, tsformat) self._forecastUntil = timestamp
def forecast_until(self, timestamp, tsformat=None)
Sets the forecasting goal (timestamp wise). This function enables the automatic determination of valuesToForecast. :param timestamp: timestamp containing the end date of the forecast. :param string tsformat: Format of the timestamp. This is used to convert the timestamp from UNIX epochs, if necessary. For valid examples take a look into the :py:func:`time.strptime` documentation.
5.592313
6.25032
0.894724
# do not set anything, if it is not required if self._forecastUntil is None: return # check the TimeSeries for correctness if not timeSeries.is_sorted(): raise ValueError("timeSeries has to be sorted.") if not timeSeries.is_normalized(): raise ValueError("timeSeries has to be normalized.") timediff = timeSeries[-1][0] - timeSeries[-2][0] forecastSpan = self._forecastUntil - timeSeries[-1][0] self.set_parameter("valuesToForecast", int(forecastSpan / timediff) + 1)
def _calculate_values_to_forecast(self, timeSeries)
Calculates the number of values, that need to be forecasted to match the goal set in forecast_until. This sets the parameter "valuesToForecast" and should be called at the beginning of the :py:meth:`BaseMethod.execute` implementation. :param TimeSeries timeSeries: Should be a sorted and normalized TimeSeries instance. :raise: Raises a :py:exc:`ValueError` if the TimeSeries is either not normalized or sorted.
4.352935
3.37085
1.291346
windowsize = self._parameters["windowsize"] if len (timeSeries) < windowsize: raise ValueError("windowsize is larger than the number of elements in timeSeries.") tsLength = len(timeSeries) nbrOfLoopRuns = tsLength - windowsize + 1 res = TimeSeries() for idx in xrange(nbrOfLoopRuns): end = idx + windowsize data = timeSeries[idx:end] timestamp = data[windowsize//2][0] value = sum([i[1] for i in data])/windowsize res.add_entry(timestamp, value) res.sort_timeseries() return res
def execute(self, timeSeries)
Creates a new TimeSeries containing the SMA values for the predefined windowsize. :param TimeSeries timeSeries: The TimeSeries used to calculate the simple moving average values. :return: TimeSeries object containing the smooth moving average. :rtype: TimeSeries :raise: Raises a :py:exc:`ValueError` wif the defined windowsize is larger than the number of elements in timeSeries :note: This implementation aims to support independent for loop execution.
4.314638
3.970428
1.086693
if not min_occurs in (0, 1): raise RuntimeError("Unexpected min bound for node schema") self.children[child.tag] = child self.children_min_occurs[child.tag] = min_occurs return child
def add(self, child, min_occurs=1)
Add a child node. @param child: The schema for the child node. @param min_occurs: The minimum number of times the child node must occur, if C{None} is given the default is 1.
4.549137
5.138173
0.885361
return etree.SubElement(self._root, self._get_namespace_tag(tag))
def _create_child(self, tag)
Create a new child element with the given tag.
7.092159
6.149552
1.153281
tag = self._get_namespace_tag(tag) children = self._root.findall(tag) if len(children) > 1: raise WSDLParseError("Duplicate tag '%s'" % tag) if len(children) == 0: return None return children[0]
def _find_child(self, tag)
Find the child C{etree.Element} with the matching C{tag}. @raises L{WSDLParseError}: If more than one such elements are found.
2.815937
2.594474
1.08536
if value is None: if self._schema.children_min_occurs[tag] > 0: raise WSDLParseError("Missing tag '%s'" % tag) return value return value
def _check_value(self, tag, value)
Ensure that the element matching C{tag} can have the given C{value}. @param tag: The tag to consider. @param value: The value to check @return: The unchanged L{value}, if valid. @raises L{WSDLParseError}: If the value is invalid.
6.603547
6.055658
1.090476
if name.endswith("_"): if name[:-1] in self._schema.reserved: return name[:-1] return name
def _get_tag(self, name)
Get the L{NodeItem} attribute name for the given C{tag}.
8.561005
7.643872
1.119983
if self._namespace is not None: tag = "{%s}%s" % (self._namespace, tag) return tag
def _get_namespace_tag(self, tag)
Return the given C{tag} with the namespace prefix added, if any.
2.982027
2.381532
1.252146
schema = self._schema.children.get(tag) if not schema: raise WSDLParseError("Unknown tag '%s'" % tag) return schema
def _get_schema(self, tag)
Return the child schema for the given C{tag}. @raises L{WSDLParseError}: If the tag doesn't belong to the schema.
5.094074
3.520932
1.446797
if root is not None: tag = root.tag if root.nsmap: namespace = root.nsmap[None] tag = tag[len(namespace) + 2:] if tag != self.tag: raise WSDLParseError("Expected response with tag '%s', but " "got '%s' instead" % (self.tag, tag)) return SequenceItem(self, root, namespace)
def create(self, root=None, namespace=None)
Create a sequence element with the given root. @param root: The C{etree.Element} to root the sequence at, if C{None} a new one will be created.. @result: A L{SequenceItem} with the given root. @raises L{ECResponseError}: If the given C{root} has a bad tag.
4.056625
3.714654
1.09206
if isinstance(child, LeafSchema): raise RuntimeError("Sequence can't have leaf children") if self.child is not None: raise RuntimeError("Sequence has already a child") if min_occurs is None or max_occurs is None: raise RuntimeError("Sequence node without min or max") if isinstance(child, LeafSchema): raise RuntimeError("Sequence node with leaf child type") if not child.tag == "item": raise RuntimeError("Sequence node with bad child tag") self.child = child self.min_occurs = min_occurs self.max_occurs = max_occurs return child
def set(self, child, min_occurs=1, max_occurs=1)
Set the schema for the sequence children. @param child: The schema that children must match. @param min_occurs: The minimum number of children the sequence must have. @param max_occurs: The maximum number of children the sequence can have.
3.363316
3.269953
1.028552
tag = self._schema.tag children = self._root.getchildren() if len(children) >= self._schema.max_occurs: raise WSDLParseError("Too many items in tag '%s'" % tag) schema = self._schema.child tag = "item" if self._namespace is not None: tag = "{%s}%s" % (self._namespace, tag) child = etree.SubElement(self._root, tag) return schema.create(child)
def append(self)
Append a new item to the sequence, appending it to the end. @return: The newly created item. @raises L{WSDLParseError}: If the operation would result in having more child elements than the allowed max.
3.689804
3.459867
1.066458
for index, child in enumerate(self._root.getchildren()): if child is item._root: del self[index] return item raise WSDLParseError("Non existing item in tag '%s'" % self._schema.tag)
def remove(self, item)
Remove the given C{item} from the sequence. @raises L{WSDLParseError}: If the operation would result in having less child elements than the required min_occurs, or if no such index is found.
10.452957
7.471635
1.399019
try: return children[index] except IndexError: raise WSDLParseError("Non existing item in tag '%s'" % self._schema.tag)
def _get_child(self, children, index)
Return the child with the given index.
11.221121
10.045776
1.116999
parser = etree.XMLParser(remove_blank_text=True, remove_comments=True) root = etree.fromstring(wsdl, parser=parser) types = {} responses = {} schemas = {} namespace = root.attrib["targetNamespace"] for element in root[0][0]: self._remove_namespace_from_tag(element) if element.tag in ["annotation", "group"]: continue name = element.attrib["name"] if element.tag == "element": if name.endswith("Response"): if name in responses: raise RuntimeError("Schema already defined") responses[name] = element elif element.tag == "complexType": types[name] = [element, False] else: raise RuntimeError("Top-level element with unexpected tag") for name, element in responses.iteritems(): schemas[name] = self._parse_type(element, types) schemas[name].namespace = namespace return schemas
def parse(self, wsdl)
Parse the given C{wsdl} data and build the associated schemas. @param wdsl: A string containing the raw xml of the WDSL definition to parse. @return: A C{dict} mapping response type names to their schemas.
3.148256
3.175694
0.99136
name = element.attrib["name"] type = element.attrib["type"] if not type.startswith("tns:"): raise RuntimeError("Unexpected element type %s" % type) type = type[4:] [children] = types[type][0] types[type][1] = True self._remove_namespace_from_tag(children) if children.tag not in ("sequence", "choice"): raise RuntimeError("Unexpected children type %s" % children.tag) if children[0].attrib["name"] == "item": schema = SequenceSchema(name) else: schema = NodeSchema(name) for child in children: self._remove_namespace_from_tag(child) if child.tag == "element": name, type, min_occurs, max_occurs = self._parse_child(child) if type in self.leaf_types: if max_occurs != 1: raise RuntimeError("Unexpected max value for leaf") if not isinstance(schema, NodeSchema): raise RuntimeError("Attempt to add leaf to a non-node") schema.add(LeafSchema(name), min_occurs=min_occurs) else: if name == "item": # sequence if not isinstance(schema, SequenceSchema): raise RuntimeError("Attempt to set child for " "non-sequence") schema.set(self._parse_type(child, types), min_occurs=min_occurs, max_occurs=max_occurs) else: if max_occurs != 1: raise RuntimeError("Unexpected max for node") if not isinstance(schema, NodeSchema): raise RuntimeError("Unexpected schema type") schema.add(self._parse_type(child, types), min_occurs=min_occurs) elif child.tag == "choice": pass else: raise RuntimeError("Unexpected child type") return schema
def _parse_type(self, element, types)
Parse a 'complexType' element. @param element: The top-level complexType element @param types: A map of the elements of all available complexType's. @return: The schema for the complexType.
2.740532
2.702353
1.014128
if set(child.attrib) - set(["name", "type", "minOccurs", "maxOccurs"]): raise RuntimeError("Unexpected attribute in child") name = child.attrib["name"] type = child.attrib["type"].split(":")[1] min_occurs = child.attrib.get("minOccurs") max_occurs = child.attrib.get("maxOccurs") if min_occurs is None: min_occurs = "1" min_occurs = int(min_occurs) if max_occurs is None: max_occurs = "1" if max_occurs != "unbounded": max_occurs = int(max_occurs) return name, type, min_occurs, max_occurs
def _parse_child(self, child)
Parse a single child element. @param child: The child C{etree.Element} to parse. @return: A tuple C{(name, type, min_occurs, max_occurs)} with the details about the given child.
1.842523
1.754074
1.050425
params = { 'project_id': item.project_id, 'from_user_id': item.from_user_id, 'to_user_id': item.to_user_id, } resp = requests.get(self.make_url(item.destination), headers=self.json_headers, params=params) self.check_response(resp) return resp
def get_existing_item(self, item)
Lookup item in remote service based on keys. :param item: D4S2Item data contains keys we will use for lookup. :return: requests.Response containing the successful result
3.008904
3.126983
0.962238
item_dict = { 'project_id': item.project_id, 'from_user_id': item.from_user_id, 'to_user_id': item.to_user_id, 'role': item.auth_role, 'user_message': item.user_message } if item.share_user_ids: item_dict['share_user_ids'] = item.share_user_ids data = json.dumps(item_dict) resp = requests.post(self.make_url(item.destination), headers=self.json_headers, data=data) self.check_response(resp) return resp
def create_item(self, item)
Create a new item in D4S2 service for item at the specified destination. :param item: D4S2Item data to use for creating a D4S2 item :return: requests.Response containing the successful result
2.578147
2.564323
1.005391
data = json.dumps({ 'force': force_send, }) url_suffix = "{}/send/".format(item_id) resp = requests.post(self.make_url(destination, url_suffix), headers=self.json_headers, data=data) self.check_response(resp) return resp
def send_item(self, destination, item_id, force_send)
Run send method for item_id at destination. :param destination: str which type of operation are we doing (SHARE_DESTINATION or DELIVER_DESTINATION) :param item_id: str D4S2 service id representing the item we want to send :param force_send: bool it's ok to email the item again :return: requests.Response containing the successful result
3.685046
3.8725
0.951594
if response.status_code == 401: raise D4S2Error(UNAUTHORIZED_MESSAGE) if not 200 <= response.status_code < 300: raise D4S2Error("Request to {} failed with {}:\n{}.".format(response.url, response.status_code, response.text))
def check_response(self, response)
Raises error if the response isn't successful. :param response: requests.Response response to be checked
3.466387
3.482368
0.995411
item_id = self.get_existing_item_id(api) if not item_id: item_id = self.create_item_returning_id(api) api.send_item(self.destination, item_id, force_send) else: if force_send: api.send_item(self.destination, item_id, force_send) else: item_type = D4S2Api.DEST_TO_NAME.get(self.destination, "Item") msg = "{} already sent. Run with --resend argument to resend." raise D4S2Error(msg.format(item_type), warning=True)
def send(self, api, force_send)
Send this item using api. :param api: D4S2Api sends messages to D4S2 :param force_send: bool should we send even if the item already exists
4.075652
3.645071
1.118127
resp = api.get_existing_item(self) items = resp.json() num_items = len(items) if num_items == 0: return None else: return items[0]['id']
def get_existing_item_id(self, api)
Lookup the id for this item via the D4S2 service. :param api: D4S2Api object who communicates with D4S2 server. :return str id of this item or None if not found
3.086093
3.06434
1.007099
resp = api.create_item(self) item = resp.json() return item['id']
def create_item_returning_id(self, api)
Create this item in the D4S2 service. :param api: D4S2Api object who communicates with D4S2 server. :return str newly created id for this item
5.63383
5.712879
0.986163
if self._is_current_user(to_user): raise ShareWithSelfError(SHARE_WITH_SELF_MESSAGE.format("share")) if not to_user.email: self._raise_user_missing_email_exception("share") self.set_user_project_permission(project, to_user, auth_role) return self._share_project(D4S2Api.SHARE_DESTINATION, project, to_user, force_send, auth_role, user_message)
def share(self, project, to_user, force_send, auth_role, user_message)
Send mail and give user specified access to the project. :param project: RemoteProject project to share :param to_user: RemoteUser user to receive email/access :param auth_role: str project role eg 'project_admin' to give to the user :param user_message: str message to be sent with the share :return: str email we share the project with
4.801442
4.990513
0.962114
self.remote_store.set_user_project_permission(project, user, auth_role)
def set_user_project_permission(self, project, user, auth_role)
Give user access permissions for a project. :param project: RemoteProject project to update permissions on :param user: RemoteUser user to receive permissions :param auth_role: str project role eg 'project_admin'
3.8857
4.362864
0.890631
if self._is_current_user(to_user): raise ShareWithSelfError(SHARE_WITH_SELF_MESSAGE.format("deliver")) if not to_user.email: self._raise_user_missing_email_exception("deliver") self.remove_user_permission(project, to_user) if new_project_name: project = self._copy_project(project, new_project_name, path_filter) return self._share_project(D4S2Api.DELIVER_DESTINATION, project, to_user, force_send, user_message=user_message, share_users=share_users)
def deliver(self, project, new_project_name, to_user, share_users, force_send, path_filter, user_message)
Remove access to project_name for to_user, copy to new_project_name if not None, send message to service to email user so they can have access. :param project: RemoteProject pre-existing project to be delivered :param new_project_name: str name of non-existing project to copy project_name to, if None we don't copy :param to_user: RemoteUser user we are handing over the project to :param share_users: [RemoteUser] who will have project shared with them once to_user accepts the project :param force_send: boolean enables resending of email for existing projects :param path_filter: PathFilter: filters what files are shared :param user_message: str message to be sent with the share :return: str email we sent deliver to
4.149634
4.122194
1.006657
from_user = self.remote_store.get_current_user() share_user_ids = None if share_users: share_user_ids = [share_user.id for share_user in share_users] item = D4S2Item(destination=destination, from_user_id=from_user.id, to_user_id=to_user.id, project_id=project.id, project_name=project.name, auth_role=auth_role, user_message=user_message, share_user_ids=share_user_ids) item.send(self.api, force_send) return to_user.email
def _share_project(self, destination, project, to_user, force_send, auth_role='', user_message='', share_users=None)
Send message to remote service to email/share project with to_user. :param destination: str which type of sharing we are doing (SHARE_DESTINATION or DELIVER_DESTINATION) :param project: RemoteProject project we are sharing :param to_user: RemoteUser user we are sharing with :param auth_role: str project role eg 'project_admin' email is customized based on this setting. :param user_message: str message to be sent with the share :param share_users: [RemoteUser] users to have this project shared with after delivery (delivery only) :return: the email the user should receive a message on soon
2.478019
2.479306
0.999481
temp_directory = tempfile.mkdtemp() new_project_name_or_id = ProjectNameOrId.create_from_name(new_project_name) remote_project = self.remote_store.fetch_remote_project(new_project_name_or_id) if remote_project: raise ValueError("A project with name '{}' already exists.".format(new_project_name)) activity = CopyActivity(self.remote_store.data_service, project, new_project_name) self._download_project(activity, project, temp_directory, path_filter) self._upload_project(activity, new_project_name, temp_directory) activity.finished() shutil.rmtree(temp_directory) return self.remote_store.fetch_remote_project(new_project_name_or_id, must_exist=True)
def _copy_project(self, project, new_project_name, path_filter)
Copy pre-existing project with name project_name to non-existing project new_project_name. :param project: remotestore.RemoteProject project to copy from :param new_project_name: str project to copy to :param path_filter: PathFilter: filters what files are shared :return: RemoteProject new project we copied data to
2.721066
2.647506
1.027784
self.print_func("Downloading a copy of '{}'.".format(project.name)) project_download = ProjectDownload(self.remote_store, project, temp_directory, path_filter, file_download_pre_processor=DownloadedFileRelations(activity)) project_download.run()
def _download_project(self, activity, project, temp_directory, path_filter)
Download the project with project_name to temp_directory. :param activity: CopyActivity: info about the copy activity are downloading for :param project: remotestore.RemoteProject project to download :param temp_directory: str path to directory we can download into :param path_filter: PathFilter: filters what files are shared
8.558441
7.940041
1.077884
self.print_func("Uploading to '{}'.".format(project_name)) items_to_send = [os.path.join(temp_directory, item) for item in os.listdir(os.path.abspath(temp_directory))] project_name_or_id = ProjectNameOrId.create_from_name(project_name) project_upload = ProjectUpload(self.config, project_name_or_id, items_to_send, file_upload_post_processor=UploadedFileRelations(activity)) project_upload.run()
def _upload_project(self, activity, project_name, temp_directory)
Upload the contents of temp_directory into project_name :param activity: CopyActivity: info about the copy activity are uploading for :param project_name: str project name we will upload files to :param temp_directory: str path to directory who's files we will upload
4.390476
4.371229
1.004403
current_user = self.remote_store.get_current_user() return current_user.id == some_user.id
def _is_current_user(self, some_user)
Is the specified user the current user? :param some_user: RemoteUser user we want to check against the current user :return: boolean: True if the current user is the passed in user
4.756211
4.887807
0.973077
self.data_service.update_activity(self.id, self.name, self.desc, started_on=self.started, ended_on=self._current_timestamp_str())
def finished(self)
Mark the activity as finished
10.032413
7.99453
1.25491
remote_path = project_file.path file_dict = data_service.get_file(project_file.id).json() file_version_id = file_dict['current_version']['id'] data_service.create_used_relation(self.activity.id, KindType.file_str, file_version_id) self.activity.remote_path_to_file_version_id[remote_path] = file_version_id
def run(self, data_service, project_file)
Attach a remote file to activity with used relationship. :param data_service: DataServiceApi: service used to attach relationship :param project_file: ProjectFile: contains details about a file we will attach
5.745159
4.678892
1.227889
file_version_id = file_details['current_version']['id'] data_service.create_was_generated_by_relation(self.activity.id, KindType.file_str, file_version_id) used_entity_id = self._lookup_used_entity_id(file_details) data_service.create_was_derived_from_relation(used_entity_id, KindType.file_str, file_version_id, KindType.file_str)
def run(self, data_service, file_details)
Attach a remote file to activity with was generated by relationship. :param data_service: DataServiceApi: service used to attach relationship :param file_details: dict: response from DukeDS POST to /files/ containing current_version id
4.68824
3.766321
1.24478
# Since this uses the response from POST to /files/ this will include the ancestors and not be # effected by exclude_response_fields that were used when listing the project name_parts = [ancestor['name'] for ancestor in file_details['ancestors'] if ancestor['kind'] == KindType.folder_str] name_parts.append(file_details['name']) remote_path = os.sep.join(name_parts) return self.activity.remote_path_to_file_version_id[remote_path]
def _lookup_used_entity_id(self, file_details)
Return the file_version_id associated with the path from file_details. The file_version_id is looked up from a dictionary in the activity. :param file_details: dict: response from DukeDS POST to /files/ :return: str: file_version_id uuid
9.817199
7.368667
1.33229
config = Config() config.add_properties(GLOBAL_CONFIG_FILENAME) user_config_filename = get_user_config_filename() if user_config_filename == LOCAL_CONFIG_FILENAME and not allow_insecure_config_file: verify_file_private(user_config_filename) config.add_properties(user_config_filename) return config
def create_config(allow_insecure_config_file=False)
Create config based on /etc/ddsclient.conf and ~/.ddsclient.conf($DDSCLIENT_CONF) :param allow_insecure_config_file: bool: when true we will not check ~/.ddsclient permissions. :return: Config with the configuration to use for DDSClient.
3.38467
3.790329
0.892975
filename = os.path.expanduser(filename) if os.path.exists(filename): with open(filename, 'r') as yaml_file: self.update_properties(yaml.safe_load(yaml_file))
def add_properties(self, filename)
Add properties to config based on filename replacing previous values. :param filename: str path to YAML file to pull top level properties from
2.545971
2.797575
0.910063
api_url = urlparse(self.url).hostname portal_url = re.sub('^api\.', '', api_url) portal_url = re.sub(r'api', '', portal_url) return portal_url
def get_portal_url_base(self)
Determine root url of the data service from the url specified. :return: str root url of the data service (eg: https://dataservice.duke.edu)
4.327073
4.034293
1.072573
return self.values.get(Config.AUTH, os.environ.get(AUTH_ENV_KEY_NAME, None))
def auth(self)
Contains the auth token for use with connecting to the dataservice. :return:
11.280058
13.379291
0.843098
value = self.values.get(Config.UPLOAD_BYTES_PER_CHUNK, DDS_DEFAULT_UPLOAD_CHUNKS) return Config.parse_bytes_str(value)
def upload_bytes_per_chunk(self)
Return the bytes per chunk to be sent to external store. :return: int bytes per upload chunk
9.190932
8.792714
1.04529
# Profiling download on different servers showed half the number of CPUs to be optimum for speed. default_workers = int(math.ceil(default_num_workers() / 2)) return self.values.get(Config.DOWNLOAD_WORKERS, default_workers)
def download_workers(self)
Return the number of parallel works to use when downloading a file. :return: int number of workers. Specify None or 1 to disable parallel downloading
14.61263
14.670451
0.996059
if type(value) == str: if "MB" in value: return int(value.replace("MB", "")) * MB_TO_BYTES else: return int(value) else: return value
def parse_bytes_str(value)
Given a value return the integer number of bytes it represents. Trailing "MB" causes the value multiplied by 1024*1024 :param value: :return: int number of bytes represented by value.
3.31424
2.798071
1.184473
originalValue = originalValue[0] calculatedValue = calculatedValue[0] return abs(originalValue - calculatedValue)
def local_error(self, originalValue, calculatedValue)
Calculates the error between the two given values. :param list originalValue: List containing the values of the original data. :param list calculatedValue: List containing the values of the calculated TimeSeries that corresponds to originalValue. :return: Returns the error measure of the two given values. :rtype: numeric
3.534738
5.850949
0.604131
instances = {} for pos, instance_id in enumerate(instance_ids): instances["InstanceId.%d" % (pos + 1)] = instance_id query = self.query_factory( action="DescribeInstances", creds=self.creds, endpoint=self.endpoint, other_params=instances) d = query.submit() return d.addCallback(self.parser.describe_instances)
def describe_instances(self, *instance_ids)
Describe current instances.
4.282696
4.127089
1.037704
params = {"ImageId": image_id, "MinCount": str(min_count), "MaxCount": str(max_count)} if key_name is not None: params["KeyName"] = key_name if subnet_id is not None: params["SubnetId"] = subnet_id if security_group_ids is not None: for i, id in enumerate(security_group_ids): params["SecurityGroupId.%d" % (i + 1)] = id else: msg = "You must specify the security_group_ids with the subnet_id" raise ValueError(msg) elif security_groups is not None: for i, name in enumerate(security_groups): params["SecurityGroup.%d" % (i + 1)] = name else: msg = ("You must specify either the subnet_id and " "security_group_ids or security_groups") raise ValueError(msg) if user_data is not None: params["UserData"] = b64encode(user_data) if instance_type is not None: params["InstanceType"] = instance_type if availability_zone is not None: params["Placement.AvailabilityZone"] = availability_zone if kernel_id is not None: params["KernelId"] = kernel_id if ramdisk_id is not None: params["RamdiskId"] = ramdisk_id query = self.query_factory( action="RunInstances", creds=self.creds, endpoint=self.endpoint, other_params=params) d = query.submit() return d.addCallback(self.parser.run_instances)
def run_instances(self, image_id, min_count, max_count, security_groups=None, key_name=None, instance_type=None, user_data=None, availability_zone=None, kernel_id=None, ramdisk_id=None, subnet_id=None, security_group_ids=None)
Run new instances. TODO: blockDeviceMapping, monitoring, subnetId
1.864557
1.867516
0.998415
InstanceIDParam = {"InstanceId": instance_id} query = self.query_factory( action="GetConsoleOutput", creds=self.creds, endpoint=self.endpoint, other_params=InstanceIDParam) d = query.submit() return d.addCallback(self.parser.get_console_output)
def get_console_output(self, instance_id)
Get the console output for a single instance.
5.555183
5.370754
1.03434
group_names = {} if names: group_names = dict([("GroupName.%d" % (i + 1), name) for i, name in enumerate(names)]) query = self.query_factory( action="DescribeSecurityGroups", creds=self.creds, endpoint=self.endpoint, other_params=group_names) d = query.submit() return d.addCallback(self.parser.describe_security_groups)
def describe_security_groups(self, *names)
Describe security groups. @param names: Optionally, a list of security group names to describe. Defaults to all security groups in the account. @return: A C{Deferred} that will fire with a list of L{SecurityGroup}s retrieved from the cloud.
4.050772
4.208185
0.962594
parameters = {"GroupName": name, "GroupDescription": description} if vpc_id: parameters["VpcId"] = vpc_id query = self.query_factory( action="CreateSecurityGroup", creds=self.creds, endpoint=self.endpoint, other_params=parameters) d = query.submit() return d.addCallback(self.parser.create_security_group)
def create_security_group(self, name, description, vpc_id=None)
Create security group. @param name: Name of the new security group. @param description: Description of the new security group. @param vpc_id: ID of the VPC to which the security group will belong. @return: A C{Deferred} that will fire with a truth value for the success of the operation.
4.138991
4.536664
0.912342
if source_group_name and source_group_owner_id: parameters = { "SourceSecurityGroupName": source_group_name, "SourceSecurityGroupOwnerId": source_group_owner_id, } elif ip_protocol and from_port and to_port and cidr_ip: parameters = { "IpProtocol": ip_protocol, "FromPort": from_port, "ToPort": to_port, "CidrIp": cidr_ip, } else: msg = ("You must specify either both group parameters or " "all the ip parameters.") raise ValueError(msg) if group_id: parameters["GroupId"] = group_id elif group_name: parameters["GroupName"] = group_name else: raise ValueError("You must specify either the group name of the group id.") query = self.query_factory( action="AuthorizeSecurityGroupIngress", creds=self.creds, endpoint=self.endpoint, other_params=parameters) d = query.submit() return d.addCallback(self.parser.truth_return)
def authorize_security_group( self, group_name=None, group_id=None, source_group_name="", source_group_owner_id="", ip_protocol="", from_port="", to_port="", cidr_ip="")
There are two ways to use C{authorize_security_group}: 1) associate an existing group (source group) with the one that you are targeting (group_name) with an authorization update; or 2) associate a set of IP permissions with the group you are targeting with an authorization update. @param group_name: The group you will be modifying with a new authorization. @param group_id: The id of the group you will be modifying with a new authorization. Optionally, the following parameters: @param source_group_name: Name of security group to authorize access to when operating on a user/group pair. @param source_group_owner_id: Owner of security group to authorize access to when operating on a user/group pair. If those parameters are not specified, then the following must be: @param ip_protocol: IP protocol to authorize access to when operating on a CIDR IP. @param from_port: Bottom of port range to authorize access to when operating on a CIDR IP. This contains the ICMP type if ICMP is being authorized. @param to_port: Top of port range to authorize access to when operating on a CIDR IP. This contains the ICMP code if ICMP is being authorized. @param cidr_ip: CIDR IP range to authorize access to when operating on a CIDR IP. @return: A C{Deferred} that will fire with a truth value for the success of the operation.
2.477539
2.454195
1.009512