code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# Check if the size of the input matrices matches if a.get_height() != b.get_height(): raise ValueError("Size of input matrices does not match") if b.get_width() != 1: raise ValueError("Matrix with dependent variable has more than 1 column") aPseudo = a.pseudoinverse() # The following code could be used if c is regular. # aTrans = a.transform() # c = aTrans * a # invers() raises an ValueError, if c is not invertible # cInvers = c.invers() # beta = cInvers * aTrans * b beta = aPseudo * b return beta
def lstsq(cls, a, b)
Return the least-squares solution to a linear matrix equation. :param Matrix a: Design matrix with the values of the independent variables. :param Matrix b: Matrix with the "dependent variable" values. b can only have one column. :raise: Raises an :py:exc:`ValueError`, if - the number of rows of a and b does not match. - b has more than one column. :note: The algorithm solves the following equations. beta = a^+ b.
6.176115
5.274274
1.170989
# get the defined subset of error values errorValues = self._get_error_values(startingPercentage, endPercentage, startDate, endDate) errorValues = filter(lambda item: item is not None, errorValues) return float(sum(errorValues)) / float(len(errorValues))
def _calculate(self, startingPercentage, endPercentage, startDate, endDate)
This is the error calculation function that gets called by :py:meth:`BaseErrorMeasure.get_error`. Both parameters will be correct at this time. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :param float startDate: Epoch representing the start date used for error calculation. :param float endDate: Epoch representing the end date used in the error calculation. :return: Returns a float representing the error. :rtype: float
4.365399
4.112733
1.061435
originalValue = originalValue[0] calculatedValue = calculatedValue[0] if 0 == originalValue: return None return (math.fabs((calculatedValue - originalValue)/float(originalValue))) * 100.0
def local_error(self, originalValue, calculatedValue)
Calculates the error between the two given values. :param list originalValue: List containing the values of the original data. :param list calculatedValue: List containing the values of the calculated TimeSeries that corresponds to originalValue. :return: Returns the error measure of the two given values. :rtype: numeric
3.584049
4.796169
0.747273
# calculate the history values historyLength = self._historyLength historicMeans = [] append = historicMeans.append # not most optimized loop in case of calculation operations for startIdx in xrange(len(timeSeries) - historyLength - 1): value = 0 for idx in xrange(startIdx, startIdx + historyLength): value += abs(timeSeries[idx+1][1] - timeSeries[idx][1]) append(value / float(historyLength)) return historicMeans
def _get_historic_means(self, timeSeries)
Calculates the mean value for the history of the MeanAbsoluteScaledError. :param TimeSeries timeSeries: Original TimeSeries used to calculate the mean historic values. :return: Returns a list containing the historic means. :rtype: list
4.70893
4.618903
1.019491
# ErrorMeasure was already initialized. if 0 < len(self._errorValues): raise StandardError("An ErrorMeasure can only be initialized once.") # calculating the number of datapoints used within the history if isinstance(self._historyLength, float): self._historyLength = int((self._historyLength * len(originalTimeSeries)) / 100.0) # sort the TimeSeries to reduce the required comparison operations originalTimeSeries.sort_timeseries() calculatedTimeSeries.sort_timeseries() self._historicMeans = self._get_historic_means(originalTimeSeries) # Performance optimization append = self._errorValues.append appendDates = self._errorDates.append local_error = self.local_error minCalcIdx = self._historyLength + 1 # calculate all valid local errors for orgPair in originalTimeSeries[minCalcIdx:]: for calcIdx in xrange(minCalcIdx, len(calculatedTimeSeries)): calcPair = calculatedTimeSeries[calcIdx] # Skip values that can not be compared if calcPair[0] != orgPair[0]: continue append(local_error(orgPair[1:], calcPair[1:])) appendDates(orgPair[0]) # return False, if the error cannot be calculated if len(filter(lambda item: item is not None, self._errorValues)) < self._minimalErrorCalculationPercentage * len(originalTimeSeries): self._errorValues = [] self._errorDates = [] self._historicMeans = [] return False return True
def initialize(self, originalTimeSeries, calculatedTimeSeries)
Initializes the ErrorMeasure. During initialization, all :py:meth:`BaseErrorMeasure.local_error()` are calculated. :param TimeSeries originalTimeSeries: TimeSeries containing the original data. :param TimeSeries calculatedTimeSeries: TimeSeries containing calculated data. Calculated data is smoothed or forecasted data. :return: Return :py:const:`True` if the error could be calculated, :py:const:`False` otherwise based on the minimalErrorCalculationPercentage. :rtype: boolean :raise: Raises a :py:exc:`StandardError` if the error measure is initialized multiple times.
4.845156
4.066443
1.191497
# get the defined subset of error values errorValues = self._get_error_values(startingPercentage, endPercentage, startDate, endDate) # get the historic mean if startDate is not None: possibleDates = filter(lambda date: date >= startDate, self._errorDates) # This piece of code is not required, because _get_error_values already ensured that the startDate # was correct. Otherwise it would have thrown an exception. #if 0 == len(possibleDates): # raise ValueError("%s does not represent a valid startDate." % startDate) meanIdx = self._errorDates.index(min(possibleDates)) else: meanIdx = int((startingPercentage * len(self._errorValues)) / 100.0) mad = sum(errorValues) / float(len(errorValues)) historicMean = self._historicMeans[meanIdx] return mad / historicMean
def _calculate(self, startingPercentage, endPercentage, startDate, endDate)
This is the error calculation function that gets called by :py:meth:`BaseErrorMeasure.get_error`. Both parameters will be correct at this time. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :param float startDate: Epoch representing the start date used for error calculation. :param float endDate: Epoch representing the end date used in the error calculation. :return: Returns a float representing the error. :rtype: float
4.880261
4.706894
1.036833
try: for obs in self.observations: features, ids = self.__get_features_for_observation(observation=obs, last_column_is_id=True) # the last column is the observation id normalised_data = whiten(features) # skip any rows that contain just zero values... they create nans first_safe_row = pdkit.utils.non_zero_index(normalised_data) observation_ids = features.tolist() sd = features[first_safe_row] / normalised_data[first_safe_row] # Calculate centroids and sort result centroids_array, _ = kmeans(normalised_data, n_clusters) sorted_centroids = pdkit.utils.centroid_sort(centroids_array) if not self.clusters: self.clusters = [[obs, sd.tolist(), sorted_centroids.tolist()]] else: self.clusters.append([obs, sd.tolist(),sorted_centroids.tolist()]) except IOError as e: ierr = "({}): {}".format(e.errno, e.strerror) logging.error("Error training UPDRS, file not found, I/O error %s", ierr) except ValueError as verr: logging.error("Error training UPDRS ValueError ->%s", verr.message) except: logging.error("Unexpected error on training UPDRS init: %s", sys.exc_info()[0])
def __train(self, n_clusters=4)
Calculate cluster's centroids and standard deviations. If there are at least the number of threshold rows \ then: * Observations will be normalised. * Standard deviations will be returned. * Clusters will be returned. * Centroids are ordered based on their distance from an arbitrary -100, -100 point. If there are not enough Observations, then centroids and standard deviations will be set to the empty list. General strategy: Use numpy.array for calculations. Keep everything in float. Convert arrays back to lists \ at the end. :param n_clusters: the number of clusters :type n_clusters: int
5.361105
5.210816
1.028842
normalised_point = array(point) / array(sd) observation_score = { 'original': point, 'normalised': normalised_point.tolist(), } distances = [ euclidean(normalised_point, centroid) for centroid in centroids ] return int(distances.index(min(distances)))
def get_single_score(self, point, centroids=None, sd=None)
Get a single score is a wrapper around the result of classifying a Point against a group of centroids. \ Attributes: observation_score (dict): Original received point and normalised point. :Example: >>> { "original": [0.40369016, 0.65217912], "normalised": [1.65915104, 3.03896181]} nearest_cluster (int): Index of the nearest cluster. If distances match, then lowest numbered cluster \ wins. distances (list (float)): List of distances from the Point to each cluster centroid. E.g: >>> [2.38086238, 0.12382605, 2.0362993, 1.43195021] centroids (list (list (float))): A list of the current centroidswhen queried. E.g: >>> [ [0.23944831, 1.12769265], [1.75621978, 3.11584191], [2.65884563, 1.26494783], \ [0.39421099, 2.36783733] ] :param point: the point to classify :type point: pandas.DataFrame :param centroids: the centroids :type centroids: np.array :param sd: the standard deviation :type sd: np.array :return score: the score for a given observation :rtype score: int
4.641604
3.475746
1.335427
scores_array = np.array([]) for obs in self.observations: c, sd = self.__get_centroids_sd(obs) points, ids = self.__get_features_for_observation(observation=obs, last_column_is_id=True) b = np.array([]) for p in points: b = np.append(b, [self.get_single_score(p, centroids=c, sd=sd)]) scores_array = np.vstack([scores_array, b]) if scores_array.size else b scores_array = np.concatenate((ids[:, np.newaxis], scores_array.transpose()), axis=1) header = 'id,'+','.join(self.observations) try: if output_format == 'csv': filename = join(filepath, filename) + '.' + output_format np.savetxt(filename, scores_array, delimiter=",", fmt='%i', header=header,comments='') else: print(scores_array) except: logging.error("Unexpected error on writing output")
def write_model(self, filename='scores', filepath='', output_format='csv')
This method calculates the scores and writes them to a file the data frame received. If the output format is other than 'csv' it will print the scores. :param filename: the name to give to the file :type filename: string :param filepath: the path to save the file :type filepath: string :param output_format: the format of the file to write ('csv') :type output_format: string
3.775319
3.929864
0.960674
scores = np.array([]) for obs in self.observations: c, sd = self.__get_centroids_sd(obs) p, ids = self.__get_features_for_observation(data_frame = measurement, observation=obs, last_column_is_id=True) scores = np.append(scores, [self.get_single_score(p, centroids=c, sd=sd)], axis=0) if output_format == 'array': return scores.astype(int) else: return np.array_str(scores.astype(int))
def score(self, measurement, output_format='array')
Method to score/classify a measurement against the trained knn clusters :param measurement: the point to classify :type measurement: pandas.DataFrame :param output_format: the format to return the scores ('array' or 'str') :type output_format: string :return scores: the scores for a given test/point :rtype scores: np.array
4.956521
4.940455
1.003252
destination_dir, file_url_data_dict, seek_amt, bytes_to_read = download_context.params project_file = ProjectFile(file_url_data_dict) local_path = project_file.get_local_path(destination_dir) retry_chunk_downloader = RetryChunkDownloader(project_file, local_path, seek_amt, bytes_to_read, download_context) retry_chunk_downloader.run() return 'ok'
def download_file_part_run(download_context)
Function run by CreateProjectCommand to create the project. Runs in a background process. :param download_context: UploadContext: contains data service setup and project name to create.
4.660038
5.015727
0.929085
files_to_download = self.get_files_to_download() total_files_size = self.get_total_files_size(files_to_download) if self.file_download_pre_processor: self.run_preprocessor(files_to_download) self.try_create_dir(self.dest_directory) watcher = ProgressPrinter(total_files_size, msg_verb='downloading') self.download_files(files_to_download, watcher) watcher.finished() warnings = self.check_warnings() if warnings: watcher.show_warning(warnings)
def run(self)
Download the contents of the specified project name or id to dest_directory.
4.250895
3.926461
1.082627
for project_file in files_to_download: self.file_download_pre_processor.run(self.remote_store.data_service, project_file)
def run_preprocessor(self, files_to_download)
Run file_download_pre_processor for each file we are about to download. :param files_to_download: [ProjectFile]: files that will be downloaded
8.136446
5.402802
1.505968
if not os.path.exists(path): os.mkdir(path) elif not os.path.isdir(path): ValueError("Unable to create directory:" + path + " because a file already exists with the same name.")
def try_create_dir(self, path)
Try to create a directory if it doesn't exist and raise error if there is a non-directory with the same name. :param path: str path to the directory
3.244987
3.475358
0.933713
parent_paths = set([item.get_remote_parent_path() for item in self.file_urls]) if '' in parent_paths: parent_paths.remove('') return parent_paths
def _get_parent_remote_paths(self)
Get list of remote folders based on the list of all file urls :return: set([str]): set of remote folders (that contain files)
4.655141
3.683143
1.263904
for remote_path in self._get_parent_remote_paths(): local_path = os.path.join(self.dest_directory, remote_path) self._assure_dir_exists(local_path)
def make_local_directories(self)
Create directories necessary to download the files into dest_directory
4.002293
3.239097
1.23562
for file_url in self.file_urls: local_path = file_url.get_local_path(self.dest_directory) with open(local_path, "wb") as outfile: if file_url.size > 0: outfile.seek(int(file_url.size) - 1) outfile.write(b'\0')
def make_big_empty_files(self)
Write out a empty file so the workers can seek to where they should write and write their data.
3.104057
2.892442
1.073161
size = file_url.size bytes_per_chunk = self.determine_bytes_per_chunk(size) start = 0 ranges = [] while size > 0: amount = bytes_per_chunk if amount > size: amount = size ranges.append((start, start + amount - 1)) start += amount size -= amount return ranges
def make_ranges(self, file_url)
Divides file_url size into an array of ranges to be downloaded by workers. :param: file_url: ProjectFileUrl: file url to download :return: [(int,int)]: array of (start, end) tuples
2.620098
2.467422
1.061877
workers = self.settings.config.download_workers if not workers or workers == 'None': workers = 1 bytes_per_chunk = int(math.ceil(size / float(workers))) if bytes_per_chunk < self.bytes_per_chunk: bytes_per_chunk = self.bytes_per_chunk return bytes_per_chunk
def determine_bytes_per_chunk(self, size)
Calculate the size of chunk a worker should download. The last worker may download less than this depending on file size. :return: int: byte size for a worker
2.979064
2.957884
1.00716
large_items = [] small_items = [] for file_url in self.file_urls: if file_url.size >= size: large_items.append(file_url) else: small_items.append(file_url) return large_items, small_items
def split_file_urls_by_size(self, size)
Return tuple that contains a list large files and a list of small files based on size parameter :param size: int: size (in bytes) that determines if a file is large or small :return: ([ProjectFileUrl],[ProjectFileUrl]): (large file urls, small file urls)
1.98581
1.929333
1.029273
for file_url in self.file_urls: local_path = file_url.get_local_path(self.dest_directory) self.check_file_size(file_url.size, local_path)
def check_downloaded_files_sizes(self)
Make sure the files sizes are correct. Since we manually create the files this will only catch overruns. Raises ValueError if there is a problematic file.
3.692647
3.411129
1.082529
stat_info = os.stat(path) if stat_info.st_size != file_size: format_str = "Error occurred downloading {}. Got a file size {}. Expected file size:{}" msg = format_str.format(path, stat_info.st_size, file_size) raise ValueError(msg)
def check_file_size(file_size, path)
Raise an error if we didn't get all of the file. :param file_size: int: size of this file :param path: str path where we downloaded the file to
3.409915
3.356748
1.015839
params = (self.settings.dest_directory, self.file_url.json_data, self.seek_amt, self.bytes_to_read) return DownloadContext(self.settings, params, message_queue, task_id)
def create_context(self, message_queue, task_id)
Create data needed by upload_project_run(DukeDS connection info). :param message_queue: Queue: queue background process can send messages to us on :param task_id: int: id of this command's task so message will be routed correctly
10.142062
11.821382
0.857942
headers = self.get_range_headers() if file_download.http_headers: headers.update(file_download.http_headers) separator = "" if not file_download.url.startswith("/"): separator = "/" url = '{}{}{}'.format(file_download.host, separator, file_download.url) return url, headers
def get_url_and_headers_for_range(self, file_download)
Return url and headers to use for downloading part of a file, adding range headers. :param file_download: FileDownload: contains data about file we will download :return: str, dict: url to download and headers to use
3.021491
3.2278
0.936084
response = requests.get(url, headers=headers, stream=True) if response.status_code == SWIFT_EXPIRED_STATUS_CODE \ or response.status_code == S3_EXPIRED_STATUS_CODE: raise DownloadInconsistentError(response.text) response.raise_for_status() self.actual_bytes_read = 0 self._write_response_to_file(response) self._verify_download_complete()
def download_chunk(self, url, headers)
Download part of a file and write to our file :param url: str: URL to download this file :param headers: dict: headers used to download this file chunk
3.82445
4.125464
0.927035
with open(self.local_path, 'r+b') as outfile: # open file for read/write (no truncate) outfile.seek(self.seek_amt) for chunk in response.iter_content(chunk_size=self.bytes_per_chunk): if chunk: # filter out keep-alive chunks outfile.write(chunk) self._on_bytes_read(len(chunk))
def _write_response_to_file(self, response)
Write response to the appropriate section of the file at self.local_path. :param response: requests.Response: response containing stream-able data
3.808146
3.516454
1.082951
self.actual_bytes_read += num_bytes_read if self.actual_bytes_read > self.bytes_to_read: raise TooLargeChunkDownloadError(self.actual_bytes_read, self.bytes_to_read, self.local_path) self.download_context.send_processed_message(num_bytes_read)
def _on_bytes_read(self, num_bytes_read)
Record our progress so we can validate that we receive all the data :param num_bytes_read: int: number of bytes we received as part of one chunk
3.936382
3.8145
1.031952
if self.actual_bytes_read > self.bytes_to_read: raise TooLargeChunkDownloadError(self.actual_bytes_read, self.bytes_to_read, self.local_path) elif self.actual_bytes_read < self.bytes_to_read: raise PartialChunkDownloadError(self.actual_bytes_read, self.bytes_to_read, self.local_path)
def _verify_download_complete(self)
Make sure we received all the data
2.626605
2.451084
1.07161
if forecastingMethods is None or len(forecastingMethods) == 0: raise ValueError("forecastingMethods cannot be empty.") self._startingPercentage = startingPercentage self._endPercentage = endPercentage results = [] for forecastingMethod in forecastingMethods: results.append([forecastingMethod] + self.optimize_forecasting_method(timeSeries, forecastingMethod)) # get the forecasting method with the smallest error bestForecastingMethod = min(results, key=lambda item: item[1].get_error(self._startingPercentage, self._endPercentage)) for parameter in bestForecastingMethod[2]: bestForecastingMethod[0].set_parameter(parameter, bestForecastingMethod[2][parameter]) return bestForecastingMethod
def optimize(self, timeSeries, forecastingMethods=None, startingPercentage=0.0, endPercentage=100.0)
Runs the optimization of the given TimeSeries. :param TimeSeries timeSeries: TimeSeries instance that requires an optimized forecast. :param list forecastingMethods: List of forecastingMethods that will be used for optimization. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :return: Returns the optimized forecasting method, the corresponding error measure and the forecasting methods parameters. :rtype: [BaseForecastingMethod, BaseErrorMeasure, Dictionary] :raise: Raises a :py:exc:`ValueError` ValueError if no forecastingMethods is empty.
2.429964
2.249363
1.08029
interval = forecastingMethod.get_interval(parameter) precision = 10**self._precison startValue = interval[0] endValue = interval[1] if not interval[2]: startValue += precision if interval[3]: endValue += precision while startValue < endValue: # fix the parameter precision parameterValue = startValue yield parameterValue startValue += precision
def _generate_next_parameter_value(self, parameter, forecastingMethod)
Generator for a specific parameter of the given forecasting method. :param string parameter: Name of the parameter the generator is used for. :param BaseForecastingMethod forecastingMethod: Instance of a ForecastingMethod. :return: Creates a generator used to iterate over possible parameters. :rtype: generator
5.224325
5.337299
0.978833
tuneableParameters = forecastingMethod.get_optimizable_parameters() remainingParameters = [] for tuneableParameter in tuneableParameters: remainingParameters.append([tuneableParameter, [item for item in self._generate_next_parameter_value(tuneableParameter, forecastingMethod)]]) # Collect the forecasting results forecastingResults = self.optimization_loop(timeSeries, forecastingMethod, remainingParameters) # Debugging GridSearchTest.inner_optimization_result_test #print "" #print "GridSearch" #print "Instance / SMAPE / Alpha" #for item in forecastingResults: # print "%s / %s / %s" % ( # str(item[0])[-12:-1], # str(item[0].get_error(self._startingPercentage, self._endPercentage))[:8], # item[1]["smoothingFactor"] #) #print "" # Collect the parameters that resulted in the smallest error bestForecastingResult = min(forecastingResults, key=lambda item: item[0].get_error(self._startingPercentage, self._endPercentage)) # return the determined parameters return bestForecastingResult
def optimize_forecasting_method(self, timeSeries, forecastingMethod)
Optimizes the parameters for the given timeSeries and forecastingMethod. :param TimeSeries timeSeries: TimeSeries instance, containing hte original data. :param BaseForecastingMethod forecastingMethod: ForecastingMethod that is used to optimize the parameters. :return: Returns a tuple containing only the smallest BaseErrorMeasure instance as defined in :py:meth:`BaseOptimizationMethod.__init__` and the forecastingMethods parameter. :rtype: tuple
5.09417
5.221167
0.975677
if currentParameterValues is None: currentParameterValues = {} # The most inner loop is reached if 0 == len(remainingParameters): # set the forecasting parameters for parameter in currentParameterValues: forecastingMethod.set_parameter(parameter, currentParameterValues[parameter]) # calculate the forecast forecast = timeSeries.apply(forecastingMethod) # create and initialize the ErrorMeasure error = self._errorClass(**self._errorMeasureKWArgs) # when the error could not be calculated, return an empty result if not error.initialize(timeSeries, forecast): return [] # Debugging GridSearchTest.inner_optimization_result_test #print "Instance / SMAPE / Alpha: %s / %s / %s" % ( # str(error)[-12:-1], # str(error.get_error(self._startingPercentage, self._endPercentage))[:8], # currentParameterValues["smoothingFactor"] #) # return the result return [[error, dict(currentParameterValues)]] # If this is not the most inner loop than extract an additional parameter localParameter = remainingParameters[-1] localParameterName = localParameter[0] localParameterValues = localParameter[1] # initialize the result results = [] # check the next level for each existing parameter for value in localParameterValues: currentParameterValues[localParameterName] = value remainingParameters = remainingParameters[:-1] results += self.optimization_loop(timeSeries, forecastingMethod, remainingParameters, currentParameterValues) return results
def optimization_loop(self, timeSeries, forecastingMethod, remainingParameters, currentParameterValues=None)
The optimization loop. This function is called recursively, until all parameter values were evaluated. :param TimeSeries timeSeries: TimeSeries instance that requires an optimized forecast. :param BaseForecastingMethod forecastingMethod: ForecastingMethod that is used to optimize the parameters. :param list remainingParameters: List containing all parameters with their corresponding values that still need to be evaluated. When this list is empty, the most inner optimization loop is reached. :param dictionary currentParameterValues: The currently evaluated forecast parameter combination. :return: Returns a list containing a BaseErrorMeasure instance as defined in :py:meth:`BaseOptimizationMethod.__init__` and the forecastingMethods parameter. :rtype: list
5.124355
4.619359
1.109322
cur = db.cursor().execute() original = TimeSeries() original.initialize_from_sql_cursor(cur) original.normalize("day", fusionMethod = "sum") return itty.Response(json.dumps(original, cls=PycastEncoder), content_type='application/json')
def energy_data()
Connects to the database and loads Readings for device 8.
15.250303
15.36579
0.992484
#Parse arguments seasonLength = int(request.POST.get('seasonLength', 6)) valuesToForecast = int(request.POST.get('valuesToForecast', 0)) data = json.loads(request.POST.get('data', [])) original = TimeSeries.from_twodim_list(data) original.normalize("day") #due to bug in TimeSeries.apply original.set_timeformat("%d.%m") #optimize smoothing hwm = HoltWintersMethod(seasonLength = seasonLength, valuesToForecast = valuesToForecast) gridSearch = GridSearch(SMAPE) optimal_forecasting, error, optimal_params = gridSearch.optimize(original, [hwm]) #perform smoothing smoothed = optimal_forecasting.execute(original) smoothed.set_timeformat("%d.%m") result = { 'params': optimal_params, 'original': original, 'smoothed': smoothed, 'error': round(error.get_error(), 3) } return itty.Response(json.dumps(result, cls=PycastEncoder), content_type='application/json')
def optimize(request)
Performs Holt Winters Parameter Optimization on the given post data. Expects the following values set in the post of the request: seasonLength - integer valuesToForecast - integer data - two dimensional array of [timestamp, value]
5.043784
4.190934
1.203499
#Parse arguments smoothingFactor = float(request.POST.get('smoothingFactor', 0.2)) trendSmoothingFactor = float(request.POST.get('trendSmoothingFactor', 0.3)) seasonSmoothingFactor = float(request.POST.get('seasonSmoothingFactor', 0.4)) seasonLength = int(request.POST.get('seasonLength', 6)) valuesToForecast = int(request.POST.get('valuesToForecast', 0)) data = json.loads(request.POST.get('data', [])) #perform smoothing hwm = HoltWintersMethod(smoothingFactor = smoothingFactor, trendSmoothingFactor = trendSmoothingFactor, seasonSmoothingFactor = seasonSmoothingFactor, seasonLength = seasonLength, valuesToForecast = valuesToForecast) original = TimeSeries.from_twodim_list(data) original.set_timeformat("%d.%m") smoothed = hwm.execute(original) smoothed.set_timeformat("%d.%m") error = SMAPE() error.initialize(original, smoothed) #process the result result = { 'original': original, 'smoothed': smoothed, 'error': round(error.get_error(), 3) } return itty.Response(json.dumps(result, cls=PycastEncoder), content_type='application/json')
def holtWinters(request)
Performs Holt Winters Smoothing on the given post data. Expects the following values set in the post of the request: smoothingFactor - float trendSmoothingFactor - float seasonSmoothingFactor - float seasonLength - integer valuesToForecast - integer data - two dimensional array of [timestamp, value]
2.835971
2.385315
1.188929
self.args, self.rest = schema.extract(self._raw_params) if strict and self.rest: raise APIError(400, "UnknownParameter", "The parameter %s is not " "recognized" % self.rest.keys()[0])
def parse(self, schema, strict=True)
Update C{args} and C{rest}, parsing the raw request arguments. @param schema: The L{Schema} the parameters must be extracted with. @param strict: If C{True} an error is raised if parameters not included in the schema are found, otherwise the extra parameters will be saved in the C{rest} attribute.
6.412037
4.274856
1.499942
name_to_child = {} for child in children: name_to_child[child.name] = child return name_to_child
def _name_to_child_map(children)
Create a map of name to child based on a list. :param children [LocalFolder/LocalFile] list of children: :return: map child.name -> child
2.14625
2.384598
0.900047
name_to_child = _name_to_child_map(children) for remote_child in remote_parent.children: local_child = name_to_child.get(remote_child.name) if local_child: local_child.update_remote_ids(remote_child)
def _update_remote_children(remote_parent, children)
Update remote_ids based on on parent matching up the names of children. :param remote_parent: RemoteProject/RemoteFolder who has children :param children: [LocalFolder,LocalFile] children to set remote_ids based on remote children
2.73772
2.687929
1.018524
result = None if os.path.isfile(path): result = LocalFile(path) else: result = _build_folder_tree(os.path.abspath(path), followsymlinks, file_filter) return result
def _build_project_tree(path, followsymlinks, file_filter)
Build a tree of LocalFolder with children or just a LocalFile based on a path. :param path: str path to a directory to walk :param followsymlinks: bool should we follow symlinks when walking :param file_filter: FileFilter: include method returns True if we should include a file/folder :return: the top node of the tree LocalFile or LocalFolder
2.866142
2.338814
1.225468
path_to_content = {} child_to_parent = {} ignore_file_patterns = IgnoreFilePatterns(file_filter) ignore_file_patterns.load_directory(top_abspath, followsymlinks) for dir_name, child_dirs, child_files in os.walk(top_abspath, followlinks=followsymlinks): abspath = os.path.abspath(dir_name) folder = LocalFolder(abspath) path_to_content[abspath] = folder # If we have a parent add us to it. parent_path = child_to_parent.get(abspath) if parent_path: path_to_content[parent_path].add_child(folder) remove_child_dirs = [] for child_dir in child_dirs: # Record dir_name as the parent of child_dir so we can call add_child when get to it. abs_child_path = os.path.abspath(os.path.join(dir_name, child_dir)) if ignore_file_patterns.include(abs_child_path, is_file=False): child_to_parent[abs_child_path] = abspath else: remove_child_dirs.append(child_dir) for remove_child_dir in remove_child_dirs: child_dirs.remove(remove_child_dir) for child_filename in child_files: abs_child_filename = os.path.join(dir_name, child_filename) if ignore_file_patterns.include(abs_child_filename, is_file=True): folder.add_child(LocalFile(abs_child_filename)) return path_to_content.get(top_abspath)
def _build_folder_tree(top_abspath, followsymlinks, file_filter)
Build a tree of LocalFolder with children based on a path. :param top_abspath: str path to a directory to walk :param followsymlinks: bool should we follow symlinks when walking :param file_filter: FileFilter: include method returns True if we should include a file/folder :return: the top node of the tree LocalFolder
2.283543
2.283536
1.000003
abspath = os.path.abspath(path) self.children.append(_build_project_tree(abspath, self.followsymlinks, self.file_filter))
def add_path(self, path)
Add the path and any children files/folders to the list of content. :param path: str path to add
7.866468
8.848203
0.889047
if remote_project: self.remote_id = remote_project.id _update_remote_children(remote_project, self.children)
def update_remote_ids(self, remote_project)
Compare against remote_project saving off the matching uuids of of matching content. :param remote_project: RemoteProject project to compare against
5.140263
5.941441
0.865154
self.remote_id = remote_folder.id _update_remote_children(remote_folder, self.children)
def update_remote_ids(self, remote_folder)
Set remote id based on remote_folder and check children against this folder's children. :param remote_folder: RemoteFolder to compare against
6.054929
5.238685
1.155811
self.remote_id = remote_file.id hash_data = self.path_data.get_hash() if hash_data.matches(remote_file.hash_alg, remote_file.file_hash): self.need_to_send = False
def update_remote_ids(self, remote_file)
Based on a remote file try to assign a remote_id and compare hash info. :param remote_file: RemoteFile remote data pull remote_id from
5.433805
5.265839
1.031897
chunks = math.ceil(float(self.size) / float(bytes_per_chunk)) return max(chunks, 1)
def count_chunks(self, bytes_per_chunk)
Based on the size of the file determine how many chunks we will need to upload. For empty files 1 chunk is returned (DukeDS requires an empty chunk for empty files). :param bytes_per_chunk: int: how many bytes should chunks to spglit the file into :return: int: number of chunks that will need to be sent
3.371887
4.247654
0.793823
return self.alg == hash_alg and self.value == hash_value
def matches(self, hash_alg, hash_value)
Does our algorithm and hash value match the specified arguments. :param hash_alg: str: hash algorithm :param hash_value: str: hash value :return: boolean
4.703464
5.84169
0.805155
mime_type, encoding = mimetypes.guess_type(self.path) if not mime_type: mime_type = 'application/octet-stream' return mime_type
def mime_type(self)
Guess the mimetype of a file or 'application/octet-stream' if unable to guess. :return: str: mimetype
2.242494
2.365174
0.94813
chunk = None with open(self.path, 'rb') as infile: chunk = infile.read() return chunk
def read_whole_file(self)
Slurp the whole file into memory. Should only be used with relatively small files. :return: str: file contents
4.295221
5.227388
0.821676
with open(filename, "rb") as f: for chunk in iter(lambda: f.read(block_size), b""): self.hash.update(chunk)
def add_file(self, filename, block_size=4096)
Add an entire file to this hash. :param filename: str filename of the file to hash :param block_size: int size of chunks when reading the file
2.304287
2.172319
1.06075
# TODO - allow fetching of parent based on child? if parent is not None: route = copy(parent.route) else: route = {} if id is not None and cls.ID_NAME is not None: route[cls.ID_NAME] = id obj = cls(key=parent.key, route=route, config=parent.config) if data: # This is used in "get all" queries obj.data = data else: obj.fetch() return obj
def get(cls, parent=None, id=None, data=None)
Inherit info from parent and return new object
5.010429
4.981491
1.005809
if parent is not None: route = copy(parent.route) else: route = {} if cls.ID_NAME is not None: # Empty string triggers "get all resources" route[cls.ID_NAME] = "" base_obj = cls(key=parent.key, route=route, config=parent.config) start = datetime.now() r = requests.get( base_obj._url(), auth=(base_obj.key, ""), params=params) cls._delay_for_ratelimits(start) if r.status_code not in cls.TRUTHY_CODES: return base_obj._handle_request_exception(r) response = r.json() objects_data = response.get(base_obj.ENVELOPE or base_obj, []) return_objects = [] for data in objects_data: # Note that this approach does not get meta data return_objects.append( cls.get( parent=parent, id=data.get(cls.ID_NAME, data.get("id")), data=data)) return return_objects
def get_all(cls, parent=None, **params)
Perform a read request against the resource
4.703643
4.660083
1.009348
if self.ID_NAME not in self.route.keys() and "id" in self.data.keys(): self.route[self.ID_NAME] = self.data["id"] return self.config.BASE + self.PATH.format(**self.route)
def _url(self)
Get the URL for the resource
4.998929
4.480062
1.115817
try: data = request.json() except: data = {} code = request.status_code if code == requests.codes.bad: raise BadRequestException(response=data) if code == requests.codes.unauthorized: raise UnauthorizedException(response=data) if code == requests.codes.not_found: raise NotFoundException(response=data) # Generic error fallback request.raise_for_status()
def _handle_request_exception(request)
Raise the proper exception based on the response
2.778251
2.569216
1.081362
start = datetime.now() r = requests.get(self._url(), auth=(self.key, "")) self._delay_for_ratelimits(start) if r.status_code not in self.TRUTHY_CODES: return self._handle_request_exception(r) response = r.json() if self.ENVELOPE: self.data = response.get(self.ENVELOPE, {}) else: self.data = response # Move to separate function so it can be overrridden self._process_meta(response)
def fetch(self)
Perform a read request against the resource
5.066556
4.904255
1.033094
for key in self.META_ENVELOPES: self.meta[key] = response.get(key)
def _process_meta(self, response)
Process additional data sent in response
8.098641
6.539524
1.238414
start = datetime.now() r = requests.delete(self._url(), auth=(self.key, "")) self._delay_for_ratelimits(start) if r.status_code not in self.TRUTHY_CODES: return self._handle_request_exception(r)
def delete(self)
Delete the object
6.562651
6.092948
1.07709
start = datetime.now() r = requests.patch(self._url(), auth=(self.key, ""), data=kwargs) self._delay_for_ratelimits(start) if r.status_code not in self.TRUTHY_CODES: return self._handle_request_exception(r) # Refetch for safety. We could modify based on response, # but I'm afraid of some edge cases and marshal functions. self.fetch()
def patch(self, **kwargs)
Change attributes of the item
9.175203
9.000831
1.019373
if parent is None: raise Exception("Parent class is required") route = copy(parent.route) if cls.ID_NAME is not None: route[cls.ID_NAME] = "" obj = cls(key=parent.key, route=route, config=parent.config) start = datetime.now() response = requests.post(obj._url(), auth=(obj.key, ""), data=kwargs) cls._delay_for_ratelimits(start) if response.status_code not in cls.TRUTHY_CODES: return cls._handle_request_exception(response) # No envelope on post requests data = response.json() obj.route[obj.ID_NAME] = data.get("id", data.get(obj.ID_NAME)) obj.data = data return obj
def create(cls, parent=None, **kwargs)
Create an object and return it
4.836578
4.69683
1.029754
stop = datetime.now() duration_microseconds = (stop - start).microseconds if duration_microseconds < cls.REQUEST_TIME_MICROSECONDS: time.sleep((cls.REQUEST_TIME_MICROSECONDS - duration_microseconds) / MICROSECONDS_PER_SECOND)
def _delay_for_ratelimits(cls, start)
If request was shorter than max request time, delay
3.347155
3.083813
1.085395
logging.debug("{} data --> Loaded".format(format_file)) data_frame = load_data(filename, format_file) data_frame.sampling_rate = get_sampling_rate_from_timestamp(data_frame) return data_frame
def load_data(filename, format_file='cloudupdrs')
This is a general load data method where the format of data to load can be passed as a parameter, :param str filename: The path to load data from :param str format_file: format of the file. Default is CloudUPDRS ('cloudupdrs'). Set to 'mpower' for mpower data. :return DataFrame dataframe: data_frame.x, data_frame.y, data_frame.z: x, y, z components of the acceleration data_frame.index is the datetime-like index
5.91402
6.372164
0.928102
region = AWSServiceRegion(access_key=self.key, secret_key=self.secret, uri=self.endpoint) query = self.query_factory(action=self.action, creds=region.creds, endpoint=region.ec2_endpoint, other_params=self.parameters) def write_response(response): print >> self.output, "URL: %s" % query.client.url print >> self.output print >> self.output, "HTTP status code: %s" % query.client.status print >> self.output print >> self.output, response def write_error(failure): if failure.check(AWSError): message = failure.value.original else: message = failure.getErrorMessage() if message.startswith("Error Message: "): message = message[len("Error Message: "):] print >> self.output, "URL: %s" % query.client.url print >> self.output if getattr(query.client, "status", None) is not None: print >> self.output, "HTTP status code: %s" % ( query.client.status,) print >> self.output print >> self.output, message if getattr(failure.value, "response", None) is not None: print >> self.output print >> self.output, failure.value.response deferred = query.submit() deferred.addCallback(write_response) deferred.addErrback(write_error) return deferred
def run(self)
Run the configured method and write the HTTP response status and text to the output stream.
2.755563
2.667144
1.033151
new_freq = np.round(1 / self.sampling_frequency, decimals=6) df_resampled = data_frame.resample(str(new_freq) + 'S').mean() # f = interpolate.interp1d(data_frame.td, data_frame.mag_sum_acc) # new_timestamp = np.arange(data_frame.td[0], data_frame.td[-1], 1.0 / self.sampling_frequency) # df_resampled.mag_sum_acc = f(new_timestamp) logging.debug("resample signal") df_resampled = df_resampled.interpolate(method='linear') get_sampling_rate_from_timestamp(df_resampled) # df_resampled['td'] = df_resampled.index - df_resampled.index[0] return df_resampled
def resample_signal(self, data_frame)
Convenience method for frequency conversion and resampling of data frame. Object must have a DatetimeIndex. After re-sampling, this methods interpolate the time magnitude sum acceleration values and the x,y,z values of the data frame acceleration :param data_frame: the data frame to resample :param str sampling_frequency: the sampling frequency. Default is 100Hz, as recommended by the author of the pilot study [1]
3.229437
3.117739
1.035826
b_f = lambda x: butter_lowpass_filter(x.values, self.sampling_frequency, cutoff=self.cutoff_frequency, order=self.filter_order) filtered_data_frame = data_frame.apply(b_f, 0) # we don't need to filter the time difference # filtered_data_frame.td = data_frame.td logging.debug("filtered whole dataframe!") # I need to fix this as I am losing some important information # one idea would be to look at where the sign changes (first and second peak) # and keep that information aswell. if centre: # de-mean filtered_data_frame -= filtered_data_frame.mean() for col in filtered_data_frame: first_zero_crossing = np.argwhere(filtered_data_frame[col] > 0)[0][0] filtered_data_frame[col][:first_zero_crossing] = 0 # No python3 support :( # if {*keep_cols}.issubset(filtered_data_frame.columns): for c in keep_cols: if c not in filtered_data_frame.columns: return filtered_data_frame[keep_cols] = data_frame[keep_cols] return filtered_data_frame
def filter_data_frame(self, data_frame, centre=False, keep_cols=['anno'])
This method filters a data frame signal as suggested in [1]. First step is to high pass filter the data frame using a butter Butterworth digital and analog filter (https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html). Then the method filter the data frame along one-dimension using a digital filter. (https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html) :param data_frame: the data frame :param str cutoff_frequency: The path to load data from :param str filter_order: format of the file. Default is CloudUPDRS. Set to mpower for mpower data.
4.831543
4.999995
0.96631
host = self.host.lower() if self.port is not None: host = "%s:%s" % (host, self.port) return host
def get_canonical_host(self)
Return the canonical host as for the Host HTTP header specification.
2.792481
2.288881
1.22002
parts = canonical_host.lower().split(":") self.host = parts[0] if len(parts) > 1 and parts[1]: self.port = int(parts[1]) else: self.port = None
def set_canonical_host(self, canonical_host)
Set host and port from a canonical host string as for the Host HTTP header specification.
2.302609
2.114942
1.088734
uri = "%s://%s%s" % (self.scheme, self.get_canonical_host(), self.path) return uri
def get_uri(self)
Get a URL representation of the service.
4.298233
3.604728
1.192388
key = str(cls) + str(args) + str(kwds) instance = self._clients.get(key) if purge_cache or not instance: instance = cls(*args, **kwds) self._clients[key] = instance return instance
def get_client(self, cls, purge_cache=False, *args, **kwds)
This is a general method for getting a client: if present, it is pulled from the cache; if not, a new one is instantiated and then put into the cache. This method should not be called directly, but rather by other client-specific methods (e.g., get_ec2_client).
2.326153
2.329419
0.998598
def _optimized(self, *args, **kwargs): if self.optimizationEnabled: class_name = self.__class__.__name__ module = self.__module__.replace("pycast", "pycastC") try: imported = __import__("%s.%s" % (module, class_name), globals(), locals(), [fn.__name__]) function = getattr(imported, fn.__name__) return function(self, *args, **kwargs) except ImportError: print "[WARNING] Could not enable optimization for %s, %s" % (fn.__name__, self) return fn(self, *args, **kwargs) else: return fn(self, *args, **kwargs) setattr(_optimized, "__name__", fn.__name__) setattr(_optimized, "__repr__", fn.__repr__) setattr(_optimized, "__str__", fn.__str__) setattr(_optimized, "__doc__", fn.__doc__) return _optimized
def optimized(fn)
Decorator that will call the optimized c++ version of a pycast function if available rather than theo original pycast function :param function fn: original pycast function :return: return the wrapped function :rtype: function
2.853405
2.583309
1.104554
# no forecasting methods provided if forecastingMethods is None or len(forecastingMethods) == 0: raise ValueError("forecastingMethods cannot be empty.")
def optimize(self, timeSeries, forecastingMethods=None, startingPercentage=0.0, endPercentage=100.0)
Runs the optimization on the given TimeSeries. :param TimeSeries timeSeries: TimeSeries instance that requires an optimized forecast. :param list forecastingMethods: List of forecastingMethods that will be used for optimization. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :return: Returns the optimized forecasting method with the smallest error. :rtype: (BaseForecastingMethod, Dictionary) :raise: Raises a :py:exc:`ValueError` ValueError if no forecastingMethods is empty.
3.590281
4.659488
0.770531
self.seen_paths.add(path) return self.filter.include(path)
def include_path(self, path)
Should this path be included based on the include_paths or exclude_paths. Keeps track of paths seen to allow finding unused filters. :param path: str: remote path to be filtered :return: bool: True if we should include the path
7.076277
7.246327
0.976533
return [path for path in self.filter.paths if path not in self.seen_paths]
def get_unused_paths(self)
Returns which include_paths or exclude_paths that were not used via include_path method. :return: [str] list of filtering paths that were not used.
8.226759
8.419603
0.977096
parent_dir = os.path.join(parent_path, '') child_dir = os.path.join(child_path, '') return child_dir.startswith(parent_dir)
def is_child(child_path, parent_path)
Is parent_path a parent(or grandparent) directory of child_path. :param child_path: str: remote file path :param parent_path: str: remote file path :return: bool: True when parent_path is child_path's parent
2.283979
2.715034
0.841234
return PathFilterUtil.is_child(path, some_path) or PathFilterUtil.is_child(some_path, path)
def parent_child_paths(path, some_path)
Is path a parent of some_path or some_path is a parent of path. :param path: str: remote file path :param some_path: str: remote file path :return: bool: True when they are parents
4.953079
5.081929
0.974645
def callback(scanner, name, method_class): if method_class.actions is not None: actions = method_class.actions else: actions = [name] if method_class.versions is not None: versions = method_class.versions else: versions = [None] for action in actions: for version in versions: scanner.registry.add(method_class, action=action, version=version) from venusian import attach attach(method_class, callback, category="method") return method_class
def method(method_class)
Decorator to use to mark an API method. When invoking L{Registry.scan} the classes marked with this decorator will be added to the registry. @param method_class: The L{Method} class to register.
3.091574
3.056488
1.011479
if not script: return '' # Convert line endings in case the file was created on Windows. script = script.replace(b'\r\n', b'\n') script = script.replace(b'\r', b'\n') if minify: if not can_minify: raise ValueError("No minifier is available") script = nudatus.mangle(script.decode('utf-8')).encode('utf-8') # Add header, pad to multiple of 16 bytes. data = b'MP' + struct.pack('<H', len(script)) + script # Padding with null bytes in a 2/3 compatible way data = data + (b'\x00' * (16 - len(data) % 16)) if len(data) > _MAX_SIZE: # 'MP' = 2 bytes, script length is another 2 bytes. raise ValueError("Python script must be less than 8188 bytes.") # Convert to .hex format. output = [':020000040003F7'] # extended linear address, 0x0003. addr = _SCRIPT_ADDR for i in range(0, len(data), 16): chunk = data[i:min(i + 16, len(data))] chunk = struct.pack('>BHB', len(chunk), addr & 0xffff, 0) + chunk checksum = (-(sum(bytearray(chunk)))) & 0xff hexline = ':%s%02X' % (strfunc(binascii.hexlify(chunk)).upper(), checksum) output.append(hexline) addr += 16 return '\n'.join(output)
def hexlify(script, minify=False)
Takes the byte content of a Python script and returns a hex encoded version of it. Based on the hexlify script in the microbit-micropython repository.
4.446006
4.485707
0.991149
lines = blob.split('\n')[1:] output = [] for line in lines: # Discard the address, length etc. and reverse the hexlification output.append(binascii.unhexlify(line[9:-2])) # Check the header is correct ("MP<size>") if (output[0][0:2].decode('utf-8') != u'MP'): return '' # Strip off header output[0] = output[0][4:] # and strip any null bytes from the end output[-1] = output[-1].strip(b'\x00') script = b''.join(output) try: result = script.decode('utf-8') return result except UnicodeDecodeError: # Return an empty string because in certain rare circumstances (where # the source hex doesn't include any embedded Python code) this # function may be passed in "raw" bytes from MicroPython. return ''
def unhexlify(blob)
Takes a hexlified script and turns it back into a string of Python code.
6.725163
6.526897
1.030377
if not runtime_hex: raise ValueError('MicroPython runtime hex required.') if not python_hex: return runtime_hex py_list = python_hex.split() runtime_list = runtime_hex.split() embedded_list = [] # The embedded list should be the original runtime with the Python based # hex embedded two lines from the end. embedded_list.extend(runtime_list[:-5]) embedded_list.extend(py_list) embedded_list.extend(runtime_list[-5:]) return '\n'.join(embedded_list) + '\n'
def embed_hex(runtime_hex, python_hex=None)
Given a string representing the MicroPython runtime hex, will embed a string representing a hex encoded Python script into it. Returns a string representation of the resulting combination. Will raise a ValueError if the runtime_hex is missing. If the python_hex is missing, it will return the unmodified runtime_hex.
3.734995
3.5953
1.038855
hex_lines = embedded_hex.split('\n') script_addr_high = hex((_SCRIPT_ADDR >> 16) & 0xffff)[2:].upper().zfill(4) script_addr_low = hex(_SCRIPT_ADDR & 0xffff)[2:].upper().zfill(4) start_script = None within_range = False # Look for the script start address for loc, val in enumerate(hex_lines): if val[0:9] == ':02000004': # Reached an extended address record, check if within script range within_range = val[9:13].upper() == script_addr_high elif within_range and val[0:3] == ':10' and \ val[3:7].upper() == script_addr_low: start_script = loc break if start_script: # Find the end of the script end_script = None for loc, val in enumerate(hex_lines[start_script:]): if val[9:41] == 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF': end_script = loc + start_script break # Pass the extracted hex through unhexlify return unhexlify('\n'.join( hex_lines[start_script - 1:end_script if end_script else -6])) return ''
def extract_script(embedded_hex)
Given a hex file containing the MicroPython runtime and an embedded Python script, will extract the original Python script. Returns a string containing the original embedded script.
3.54421
3.48889
1.015856
# Check what sort of operating system we're on. if os.name == 'posix': # 'posix' means we're on Linux or OSX (Mac). # Call the unix "mount" command to list the mounted volumes. mount_output = check_output('mount').splitlines() mounted_volumes = [x.split()[2] for x in mount_output] for volume in mounted_volumes: if volume.endswith(b'MICROBIT'): return volume.decode('utf-8') # Return a string not bytes. elif os.name == 'nt': # 'nt' means we're on Windows. def get_volume_name(disk_name): vol_name_buf = ctypes.create_unicode_buffer(1024) ctypes.windll.kernel32.GetVolumeInformationW( ctypes.c_wchar_p(disk_name), vol_name_buf, ctypes.sizeof(vol_name_buf), None, None, None, None, 0) return vol_name_buf.value # # In certain circumstances, volumes are allocated to USB # storage devices which cause a Windows popup to raise if their # volume contains no media. Wrapping the check in SetErrorMode # with SEM_FAILCRITICALERRORS (1) prevents this popup. # old_mode = ctypes.windll.kernel32.SetErrorMode(1) try: for disk in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': path = '{}:\\'.format(disk) # # Don't bother looking if the drive isn't removable # if ctypes.windll.kernel32.GetDriveTypeW(path) != 2: continue if os.path.exists(path) and \ get_volume_name(path) == 'MICROBIT': return path finally: ctypes.windll.kernel32.SetErrorMode(old_mode) else: # No support for unknown operating systems. raise NotImplementedError('OS "{}" not supported.'.format(os.name))
def find_microbit()
Returns a path on the filesystem that represents the plugged in BBC micro:bit that is to be flashed. If no micro:bit is found, it returns None. Works on Linux, OSX and Windows. Will raise a NotImplementedError exception if run on any other operating system.
3.490344
3.459714
1.008853
if not hex_file: raise ValueError('Cannot flash an empty .hex file.') if not path.endswith('.hex'): raise ValueError('The path to flash must be for a .hex file.') with open(path, 'wb') as output: output.write(hex_file.encode('ascii'))
def save_hex(hex_file, path)
Given a string representation of a hex file, this function copies it to the specified path thus causing the device mounted at that point to be flashed. If the hex_file is empty it will raise a ValueError. If the filename at the end of the path does not end in '.hex' it will raise a ValueError.
4.045062
3.598031
1.124243
# Check for the correct version of Python. if not ((sys.version_info[0] == 3 and sys.version_info[1] >= 3) or (sys.version_info[0] == 2 and sys.version_info[1] >= 7)): raise RuntimeError('Will only run on Python 2.7, or 3.3 and later.') # Grab the Python script (if needed). python_hex = '' if path_to_python: if not path_to_python.endswith('.py'): raise ValueError('Python files must end in ".py".') with open(path_to_python, 'rb') as python_script: python_hex = hexlify(python_script.read(), minify) elif python_script: python_hex = hexlify(python_script, minify) runtime = _RUNTIME # Load the hex for the runtime. if path_to_runtime: with open(path_to_runtime) as runtime_file: runtime = runtime_file.read() # Generate the resulting hex file. micropython_hex = embed_hex(runtime, python_hex) # Find the micro:bit. if not paths_to_microbits: found_microbit = find_microbit() if found_microbit: paths_to_microbits = [found_microbit] # Attempt to write the hex file to the micro:bit. if paths_to_microbits: for path in paths_to_microbits: hex_path = os.path.join(path, 'micropython.hex') print('Flashing Python to: {}'.format(hex_path)) save_hex(micropython_hex, hex_path) else: raise IOError('Unable to find micro:bit. Is it plugged in?')
def flash(path_to_python=None, paths_to_microbits=None, path_to_runtime=None, python_script=None, minify=False)
Given a path to or source of a Python file will attempt to create a hex file and then flash it onto the referenced BBC micro:bit. If the path_to_python & python_script are unspecified it will simply flash the unmodified MicroPython runtime onto the device. If used, the python_script argument should be a bytes object representing a UTF-8 encoded string. For example:: script = "from microbit import *\\ndisplay.scroll('Hello, World!')" uflash.flash(python_script=script.encode('utf-8')) If paths_to_microbits is unspecified it will attempt to find the device's path on the filesystem automatically. If the path_to_runtime is unspecified it will use the built in version of the MicroPython runtime. This feature is useful if a custom build of MicroPython is available. If the automatic discovery fails, then it will raise an IOError.
2.458745
2.432182
1.010921
with open(path_to_hex, 'r') as hex_file: python_script = extract_script(hex_file.read()) if output_path: with open(output_path, 'w') as output_file: output_file.write(python_script) else: print(python_script)
def extract(path_to_hex, output_path=None)
Given a path_to_hex file this function will attempt to extract the embedded script from it and save it either to output_path or stdout
1.912912
1.770984
1.080141
if not path: raise ValueError('Please specify a file to watch') print('Watching "{}" for changes'.format(path)) last_modification_time = os.path.getmtime(path) try: while True: time.sleep(1) new_modification_time = os.path.getmtime(path) if new_modification_time == last_modification_time: continue func(*args, **kwargs) last_modification_time = new_modification_time except KeyboardInterrupt: pass
def watch_file(path, func, *args, **kwargs)
Watch a file for changes by polling its last modification time. Call the provided function with *args and **kwargs upon modification.
1.847315
1.875796
0.984817
if not argv: argv = sys.argv[1:] parser = argparse.ArgumentParser(description=_HELP_TEXT) parser.add_argument('source', nargs='?', default=None) parser.add_argument('target', nargs='*', default=None) parser.add_argument('-r', '--runtime', default=None, help="Use the referenced MicroPython runtime.") parser.add_argument('-e', '--extract', action='store_true', help=("Extract python source from a hex file" " instead of creating the hex file."), ) parser.add_argument('-w', '--watch', action='store_true', help='Watch the source file for changes.') parser.add_argument('-m', '--minify', action='store_true', help='Minify the source') parser.add_argument('--version', action='version', version='%(prog)s ' + get_version()) args = parser.parse_args(argv) if args.extract: try: extract(args.source, args.target) except Exception as ex: error_message = "Error extracting {source}: {error!s}" print(error_message.format(source=args.source, error=ex), file=sys.stderr) sys.exit(1) elif args.watch: try: watch_file(args.source, flash, path_to_python=args.source, paths_to_microbits=args.target, path_to_runtime=args.runtime) except Exception as ex: error_message = "Error watching {source}: {error!s}" print(error_message.format(source=args.source, error=ex), file=sys.stderr) sys.exit(1) else: try: flash(path_to_python=args.source, paths_to_microbits=args.target, path_to_runtime=args.runtime, minify=args.minify) except Exception as ex: error_message = ( "Error flashing {source} to {target}{runtime}: {error!s}" ) source = args.source target = args.target if args.target else "microbit" if args.runtime: runtime = "with runtime {runtime}".format(runtime=args.runtime) else: runtime = "" print(error_message.format(source=source, target=target, runtime=runtime, error=ex), file=sys.stderr) sys.exit(1)
def main(argv=None)
Entry point for the command line tool 'uflash'. Will print help text if the optional first argument is "help". Otherwise it will ensure the optional first argument ends in ".py" (the source Python script). An optional second argument is used to reference the path to the micro:bit device. Any more arguments are ignored. Exceptions are caught and printed for the user.
2.078234
2.024126
1.026731
@functools.wraps(func) def wrap(*args, **kw): t0 = time() result = func(*args, **kw) t1 = time() print('func:%r args:[%r, %r] took: %2.4f sec' % (func.__name__, args, kw, t1 - t0)) return result return wrap
def timing(func)
Measure the execution time of a function call and print the result.
1.636351
1.603329
1.020596
@functools.wraps(func) def new_func(*args, **kwargs): if sys.version_info < (3, 0): warnings.warn_explicit( "Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, filename=func.func_code.co_filename, lineno=func.func_code.co_firstlineno + 1 ) else: warnings.warn_explicit( "Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, filename=func.__code__.co_filename, lineno=func.__code__.co_firstlineno + 1 ) return func(*args, **kwargs) return new_func
def deprecated(func)
Mark functions as deprecated. It will result in a warning being emitted when the function is used.
1.377066
1.411582
0.975548
path = 'units/currencies.csv' # always use slash in Python packages filepath = pkg_resources.resource_filename('mpu', path) with open(filepath, 'r') as fp: reader = csv.reader(fp, delimiter=',', quotechar='"') next(reader, None) # skip the headers for row in reader: is_currency = currency_str in [row[0], row[1], row[2]] if is_currency: entity = row[0] name = row[1] code = row[2] numeric_code = row[3] symbol = row[4] if len(row[5]) == 0: exponent = None else: exponent = int(row[5]) if len(row[6]) > 0: withdrawal_date = row[6] else: withdrawal_date = None subunits = row[7] return Currency(name=name, code=code, numeric_code=numeric_code, symbol=symbol, exponent=exponent, entities=[entity], withdrawal_date=withdrawal_date, subunits=subunits) raise ValueError('Could not find currency \'{}\''.format(currency_str))
def get_currency(currency_str)
Convert an identifier for a currency into a currency object. Parameters ---------- currency_str : str Returns ------- currency : Currency
2.445374
2.467288
0.991118
obj = cls(name=json['name'], code=json['code'], numeric_code=json['numeric_code'], symbol=json['symbol'], exponent=json['exponent'], entities=json['entities'], withdrawal_date=json['withdrawal_date'], subunits=json['subunits']) return obj
def from_json(cls, json)
Create a Currency object from a JSON dump.
3.81484
3.086177
1.236106
table = [[str(cell) for cell in row] for row in table] column_widths = [len(cell) for cell in table[0]] for row in table: for x, cell in enumerate(row): column_widths[x] = max(column_widths[x], len(cell)) formatters = [] for width in column_widths: formatters.append('{:>' + str(width) + '}') formatter = ' '.join(formatters) for row in table: print(formatter.format(*row))
def print_table(table)
Print as a table. I recommend looking at [`tabulate`](https://pypi.org/project/tabulate/). Parameters ---------- table : list Examples -------- >>> print_table([[1, 2, 3], [41, 0, 1]]) 1 2 3 41 0 1
1.864641
2.26993
0.821453
context, mail = parseaddr(potential_email_address) first_condition = len(context) == 0 and len(mail) != 0 dot_after_at = ('@' in potential_email_address and '.' in potential_email_address.split('@')[1]) return first_condition and dot_after_at
def is_email(potential_email_address)
Check if potential_email_address is a valid e-mail address. Please note that this function has no false-negatives but many false-positives. So if it returns that the input is not a valid e-mail adress, it certainly isn't. If it returns True, it might still be invalid. For example, the domain could not be registered. Parameters ---------- potential_email_address : str Returns ------- is_email : bool Examples -------- >>> is_email('') False >>> is_email('info@martin-thoma.de') True >>> is_email('info@math.martin-thoma.de') True >>> is_email('Martin Thoma <info@martin-thoma.de>') False >>> is_email('info@martin-thoma') False
4.645116
5.675994
0.818379
true = ['true', 't', '1', 'y', 'yes', 'enabled', 'enable', 'on'] false = ['false', 'f', '0', 'n', 'no', 'disabled', 'disable', 'off'] if string_.lower() in true: return True elif string_.lower() in false or (not default): return False else: raise ValueError('The value \'{}\' cannot be mapped to boolean.' .format(string_))
def str2bool(string_, default='raise')
Convert a string to a bool. Parameters ---------- string_ : str default : {'raise', False} Default behaviour if none of the "true" strings is detected. Returns ------- boolean : bool Examples -------- >>> str2bool('True') True >>> str2bool('1') True >>> str2bool('0') False
2.424822
2.996438
0.809235
if is_none(string_, default=False): return None else: return str2bool(string_, default)
def str2bool_or_none(string_, default='raise')
Convert a string to a bool or to None. Parameters ---------- string_ : str default : {'raise', False} Default behaviour if none of the "true" or "none" strings is detected. Returns ------- bool_or_none : bool or None Examples -------- >>> str2bool_or_none('True') True >>> str2bool_or_none('1') True >>> str2bool_or_none('0') False >>> str2bool_or_none('undefined')
4.060472
6.502307
0.624466
none = ['none', 'undefined', 'unknown', 'null', ''] if string_.lower() in none: return True elif not default: return False else: raise ValueError('The value \'{}\' cannot be mapped to none.' .format(string_))
def is_none(string_, default='raise')
Check if a string is equivalent to None. Parameters ---------- string_ : str default : {'raise', False} Default behaviour if none of the "None" strings is detected. Returns ------- is_none : bool Examples -------- >>> is_none('2', default=False) False >>> is_none('undefined', default=False) True
4.530413
6.032726
0.750973
path = 'data/iban.csv' # always use slash in Python packages filepath = pkg_resources.resource_filename('mpu', path) data = mpu.io.read(filepath, delimiter=';', format='dicts') potential_iban = potential_iban.replace(' ', '') # Remove spaces if len(potential_iban) < min([int(el['length']) for el in data]): return False country = None for element in data: if element['iban_fields'][:2] == potential_iban[:2]: country = element break if country is None: return False if len(potential_iban) != int(country['length']): return False if country['country_en'] == 'Germany': checksum_val = [value for field_type, value in zip(country['iban_fields'], potential_iban) if field_type == 'k'] checksum_val = ''.join(checksum_val) checksum_exp = _calculate_german_iban_checksum(potential_iban, country['iban_fields']) return checksum_val == checksum_exp return True
def is_iban(potential_iban)
Check if a string is a valid IBAN number. IBAN is described in ISO 13616-1:2007 Part 1. Spaces are ignored. # CODE 0 = always zero b = BIC or National Bank code c = Account number i = holder's kennitala (national identification number) k = IBAN check digits n = Branch number t = Account type x = National check digit or character Examples -------- >>> is_iban('DE89 3704 0044 0532 0130 00') True >>> is_iban('DE89 3704 0044 0532 0130 01') False
3.863632
4.184814
0.92325
number = [value for field_type, value in zip(iban_fields, iban) if field_type in ['b', 'c']] translate = {'0': '0', '1': '1', '2': '2', '3': '3', '4': '4', '5': '5', '6': '6', '7': '7', '8': '8', '9': '9'} for i in range(ord('A'), ord('Z') + 1): translate[chr(i)] = str(i - ord('A') + 10) for val in 'DE00': translated = translate[val] for char in translated: number.append(char) number = sum(int(value) * 10**i for i, value in enumerate(number[::-1])) checksum = 98 - (number % 97) return str(checksum)
def _calculate_german_iban_checksum(iban, iban_fields='DEkkbbbbbbbbcccccccccc')
Calculate the checksam of the German IBAN format. Examples -------- >>> iban = 'DE41500105170123456789' >>> _calculate_german_iban_checksum(iban) '41'
2.496557
2.780509
0.897878
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(nb_bytes) < 1024.0: return '%3.1f %s%s' % (nb_bytes, unit, suffix) nb_bytes /= 1024.0 return '%.1f %s%s' % (nb_bytes, 'Yi', suffix)
def human_readable_bytes(nb_bytes, suffix='B')
Convert a byte number into a human readable format. Parameters ---------- nb_bytes : number suffix : str, optional (default: "B") Returns ------- size_str : str Examples -------- >>> human_readable_bytes(123) '123.0 B' >>> human_readable_bytes(1025) '1.0 KiB' >>> human_readable_bytes(9671406556917033397649423) '8.0 YiB'
1.286821
1.545149
0.832813
session = boto3.Session(profile_name=profile_name) conn = session.client('s3') keys = [] ret = conn.list_objects(Bucket=bucket) print(ret) if 'Contents' not in ret: return [] # Make this a generator in future and use the marker: # https://boto3.readthedocs.io/en/latest/reference/services/ # s3.html#S3.Client.list_objects for key in conn.list_objects(Bucket=bucket)['Contents']: keys.append('s3://' + bucket + '/' + key['Key']) return keys
def list_files(bucket, profile_name=None)
List up to 1000 files in a bucket. Parameters ---------- bucket : str profile_name : str, optional AWS profile Returns ------- s3_paths : List[str]
2.841208
3.006894
0.944898
session = boto3.Session(profile_name=profile_name) s3 = session.client('s3') bucket_name, key = _s3_path_split(source) s3_object = s3.get_object(Bucket=bucket_name, Key=key) body = s3_object['Body'] return body.read()
def s3_read(source, profile_name=None)
Read a file from an S3 source. Parameters ---------- source : str Path starting with s3://, e.g. 's3://bucket-name/key/foo.bar' profile_name : str, optional AWS profile Returns ------- content : bytes Raises ------ botocore.exceptions.NoCredentialsError Botocore is not able to find your credentials. Either specify profile_name or add the environment variables AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN. See https://boto3.readthedocs.io/en/latest/guide/configuration.html
1.870812
2.236909
0.836338
if not isinstance(exists_strategy, ExistsStrategy): raise ValueError('exists_strategy \'{}\' is not in {}' .format(exists_strategy, ExistsStrategy)) session = boto3.Session(profile_name=profile_name) s3 = session.resource('s3') bucket_name, key = _s3_path_split(source) if os.path.isfile(destination): if exists_strategy is ExistsStrategy.RAISE: raise RuntimeError('File \'{}\' already exists.' .format(destination)) elif exists_strategy is ExistsStrategy.ABORT: return s3.Bucket(bucket_name).download_file(key, destination)
def s3_download(source, destination, exists_strategy=ExistsStrategy.RAISE, profile_name=None)
Copy a file from an S3 source to a local destination. Parameters ---------- source : str Path starting with s3://, e.g. 's3://bucket-name/key/foo.bar' destination : str exists_strategy : {'raise', 'replace', 'abort'} What is done when the destination already exists? * `ExistsStrategy.RAISE` means a RuntimeError is raised, * `ExistsStrategy.REPLACE` means the local file is replaced, * `ExistsStrategy.ABORT` means the download is not done. profile_name : str, optional AWS profile Raises ------ botocore.exceptions.NoCredentialsError Botocore is not able to find your credentials. Either specify profile_name or add the environment variables AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN. See https://boto3.readthedocs.io/en/latest/guide/configuration.html
2.083845
2.164724
0.962638
session = boto3.Session(profile_name=profile_name) s3 = session.resource('s3') bucket_name, key = _s3_path_split(destination) with open(source, 'rb') as data: s3.Bucket(bucket_name).put_object(Key=key, Body=data)
def s3_upload(source, destination, profile_name=None)
Copy a file from a local source to an S3 destination. Parameters ---------- source : str destination : str Path starting with s3://, e.g. 's3://bucket-name/key/foo.bar' profile_name : str, optional AWS profile
1.849584
2.40661
0.768544