code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
tabular_resource = self.__tabular_resources[resource_name] try: # Sorting fields in the same order as they appear in the schema # is necessary for tables to be converted into pandas.DataFrame fields = [] if 'schema' in tabular_resource.descriptor: fields = [f['name'] for f in tabular_resource.descriptor['schema']['fields']] elif len(tabular_resource.data) > 0: fields = tabular_resource.data[0].keys() return [order_columns_in_row(fields, row) for row in tabular_resource.data] except (SchemaValidationError, ValueError, TypeError) as e: warnings.warn( 'Unable to set column types automatically using {} schema. ' 'Data types may need to be adjusted manually. ' 'Error: {}'.format(resource_name, e)) self.__invalid_schemas.append(resource_name) file_format = tabular_resource.descriptor['format'] with Stream(io.BytesIO(self.raw_data[resource_name]), format=file_format, headers=1, scheme='stream', encoding='utf-8') as stream: return [OrderedDict(zip(stream.headers, row)) for row in stream.iter()]
def _load_table(self, resource_name)
Build table structure from resource data :param resource_name:
4.354889
4.479588
0.972163
try: import pandas except ImportError: raise RuntimeError('To enable dataframe support, ' 'run \'pip install datadotworld[pandas]\'') tabular_resource = self.__tabular_resources[resource_name] field_dtypes = fields_to_dtypes(tabular_resource.descriptor['schema']) try: return pandas.read_csv( path.join( self.__base_path, tabular_resource.descriptor['path']), dtype=field_dtypes['other'], parse_dates=list(field_dtypes['dates'].keys()), infer_datetime_format=True) except ValueError as e: warnings.warn( 'Unable to set data frame dtypes automatically using {} ' 'schema. Data types may need to be adjusted manually. ' 'Error: {}'.format(resource_name, e)) return pandas.read_csv( path.join( self.__base_path, tabular_resource.descriptor['path']))
def _load_dataframe(self, resource_name)
Build pandas.DataFrame from resource data Lazy load any optional dependencies in order to allow users to use package without installing pandas if so they wish. :param resource_name:
3.714784
3.790703
0.979972
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.append_records_with_http_info(owner, id, stream_id, body, **kwargs) else: (data) = self.append_records_with_http_info(owner, id, stream_id, body, **kwargs) return data
def append_records(self, owner, id, stream_id, body, **kwargs)
Append records to a stream. This endpoint appends JSON data to a stream associated with a dataset. Streams don't need to be created before you can append data to it. They will be created on-demand, the first time they are used. Multiple records can be appended at once by using JSON-L (`application/json-l`) as the request content type. Currently, data uploaded to a dataset via a stream is not immediatelly processed. Instead, it is processed automatically at least once a day or as a result of a \"Sync files\" endpoint invocation. Once processed, the contents of a stream will appear as part of the respective dataset as a `.jsonl` file (e.g. `my-stream` will produce a file named `my-stream.jsonl`). This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.append_records(owner, id, stream_id, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param str stream_id: Stream unique identifier as defined by the user the first time the stream was used. Only lower case letters, numbers and dashes are allowed. Maximum length of 95 characters. (required) :param object body: (required) :return: None If the method is called asynchronously, returns the request thread.
1.386466
1.520571
0.911806
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.create_insight_with_http_info(project_owner, project_id, **kwargs) else: (data) = self.create_insight_with_http_info(project_owner, project_id, **kwargs) return data
def create_insight(self, project_owner, project_id, **kwargs)
Create an insight Create a new insight. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_insight(project_owner, project_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str project_owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `government` is the unique identifier of the owner. (required) :param str project_id: User name and unique identifier of the project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `how-to-add-depth-to-your-data-with-the-us-census-acs` is the unique identifier of the owner. (required) :param InsightCreateRequest body: :return: CreateInsightResponse If the method is called asynchronously, returns the request thread.
1.438024
1.491673
0.964034
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_insight_with_http_info(project_owner, project_id, id, **kwargs) else: (data) = self.delete_insight_with_http_info(project_owner, project_id, id, **kwargs) return data
def delete_insight(self, project_owner, project_id, id, **kwargs)
Delete an insight Delete an existing insight. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_insight(project_owner, project_id, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str project_owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `government` is the unique identifier of the owner. (required) :param str project_id: User name and unique identifier of the project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `how-to-add-depth-to-your-data-with-the-us-census-acs` is the unique identifier of the owner. (required) :param str id: Insight unique identifier. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.398256
1.469292
0.951653
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_insight_with_http_info(project_owner, project_id, id, **kwargs) else: (data) = self.get_insight_with_http_info(project_owner, project_id, id, **kwargs) return data
def get_insight(self, project_owner, project_id, id, **kwargs)
Retrieve an insight Retrieve an insight. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_insight(project_owner, project_id, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str project_owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `government` is the unique identifier of the owner. (required) :param str project_id: User name and unique identifier of the project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `how-to-add-depth-to-your-data-with-the-us-census-acs` is the unique identifier of the owner. (required) :param str id: Insight unique identifier. (required) :return: InsightSummaryResponse If the method is called asynchronously, returns the request thread.
1.390843
1.464399
0.94977
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_insights_for_project_with_http_info(project_owner, project_id, **kwargs) else: (data) = self.get_insights_for_project_with_http_info(project_owner, project_id, **kwargs) return data
def get_insights_for_project(self, project_owner, project_id, **kwargs)
Get insights for project. Get insights for a project. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_insights_for_project(project_owner, project_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str project_owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `government` is the unique identifier of the owner. (required) :param str project_id: User name and unique identifier of the project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `how-to-add-depth-to-your-data-with-the-us-census-acs` is the unique identifier of the owner. (required) :param str limit: Maximum number of items to include in a page of results. :param str next: Token from previous result page to be used when requesting a subsequent page. :return: PaginatedInsightResults If the method is called asynchronously, returns the request thread.
1.3654
1.425181
0.958054
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.replace_insight_with_http_info(project_owner, project_id, id, **kwargs) else: (data) = self.replace_insight_with_http_info(project_owner, project_id, id, **kwargs) return data
def replace_insight(self, project_owner, project_id, id, **kwargs)
Replace an insight Replace an insight. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.replace_insight(project_owner, project_id, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str project_owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `government` is the unique identifier of the owner. (required) :param str project_id: User name and unique identifier of the project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `how-to-add-depth-to-your-data-with-the-us-census-acs` is the unique identifier of the owner. (required) :param str id: Insight unique identifier. (required) :param InsightPutRequest body: :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.396001
1.479292
0.943695
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_insight_with_http_info(project_owner, project_id, id, **kwargs) else: (data) = self.update_insight_with_http_info(project_owner, project_id, id, **kwargs) return data
def update_insight(self, project_owner, project_id, id, **kwargs)
Update an insight Update an insight. Note that only elements included in the request will be updated. All omitted elements will remain untouched. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_insight(project_owner, project_id, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str project_owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `government` is the unique identifier of the owner. (required) :param str project_id: User name and unique identifier of the project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `how-to-add-depth-to-your-data-with-the-us-census-acs` is the unique identifier of the owner. (required) :param str id: Insight unique identifier. (required) :param InsightPatchRequest body: :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.415897
1.482951
0.954784
if summary is not None and len(summary) > 25000: raise ValueError("Invalid value for `summary`, length must be less than or equal to `25000`") if summary is not None and len(summary) < 0: raise ValueError("Invalid value for `summary`, length must be greater than or equal to `0`") self._summary = summary
def summary(self, summary)
Sets the summary of this DatasetPatchRequest. Long-form dataset summary (Markdown supported). :param summary: The summary of this DatasetPatchRequest. :type: str
1.798932
1.69703
1.060047
allowed_values = ["Public Domain", "PDDL", "CC-0", "CC-BY", "ODC-BY", "CC-BY-SA", "ODC-ODbL", "CC BY-NC", "CC BY-NC-SA", "Other"] if license not in allowed_values: raise ValueError( "Invalid value for `license` ({0}), must be one of {1}" .format(license, allowed_values) ) self._license = license
def license(self, license)
Sets the license of this DatasetPatchRequest. Dataset license. Find additional info for allowed values [here](https://data.world/license-help). :param license: The license of this DatasetPatchRequest. :type: str
2.385123
2.215008
1.076802
allowed_values = ["OPEN", "PRIVATE"] if visibility not in allowed_values: raise ValueError( "Invalid value for `visibility` ({0}), must be one of {1}" .format(visibility, allowed_values) ) self._visibility = visibility
def visibility(self, visibility)
Sets the visibility of this DatasetPatchRequest. Dataset visibility. `OPEN` if the dataset can be seen by any member of data.world. `PRIVATE` if the dataset can be seen by its owner and authorized collaborators. :param visibility: The visibility of this DatasetPatchRequest. :type: str
1.977667
1.891641
1.045477
if objective is not None and len(objective) > 120: raise ValueError("Invalid value for `objective`, length must be less than or equal to `120`") if objective is not None and len(objective) < 0: raise ValueError("Invalid value for `objective`, length must be greater than or equal to `0`") self._objective = objective
def objective(self, objective)
Sets the objective of this ProjectPutRequest. Short project objective. :param objective: The objective of this ProjectPutRequest. :type: str
1.973549
1.760921
1.120748
if not klass.swagger_types: return data kwargs = {} for attr, attr_type in iteritems(klass.swagger_types): if data is not None \ and klass.attribute_map[attr] in data \ and isinstance(data, (list, dict)): value = data[klass.attribute_map[attr]] kwargs[attr] = self.__deserialize(value, attr_type) instance = klass(**kwargs) return instance
def __deserialize_model(self, data, klass)
Deserializes list or dict to model. :param data: dict, list. :param klass: class literal. :return: model object.
2.586603
2.760828
0.936894
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.add_linked_dataset_with_http_info(owner, id, linked_dataset_owner, linked_dataset_id, **kwargs) else: (data) = self.add_linked_dataset_with_http_info(owner, id, linked_dataset_owner, linked_dataset_id, **kwargs) return data
def add_linked_dataset(self, owner, id, linked_dataset_owner, linked_dataset_id, **kwargs)
Link dataset Add a linked dataset to a project. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.add_linked_dataset(owner, id, linked_dataset_owner, linked_dataset_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), government is the unique identifier of the owner. (required) :param str id: Project unique identifier. For example, in the URL:[https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), how-to-add-depth-to-your-data-with-the-us-census-acs is the unique identifier of the project. (required) :param str linked_dataset_owner: (required) :param str linked_dataset_id: (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.320603
1.397494
0.94498
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.create_project_with_http_info(owner, **kwargs) else: (data) = self.create_project_with_http_info(owner, **kwargs) return data
def create_project(self, owner, **kwargs)
Create a project Create a new project. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_project(owner, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), government is the unique identifier of the owner. (required) :param ProjectCreateRequest body: :return: CreateProjectResponse If the method is called asynchronously, returns the request thread.
1.582842
1.626312
0.973271
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_project_with_http_info(owner, id, **kwargs) else: (data) = self.delete_project_with_http_info(owner, id, **kwargs) return data
def delete_project(self, owner, id, **kwargs)
Delete a project Permanently deletes a project and all data associated with it. This operation cannot be undone, although a new project may be created with the same id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_project(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), government is the unique identifier of the owner. (required) :param str id: Project unique identifier. For example, in the URL:[https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), how-to-add-depth-to-your-data-with-the-us-census-acs is the unique identifier of the project. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.474056
1.52138
0.968894
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_project_with_http_info(owner, id, **kwargs) else: (data) = self.get_project_with_http_info(owner, id, **kwargs) return data
def get_project(self, owner, id, **kwargs)
Retrieve a project Return details on a project. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_project(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), government is the unique identifier of the owner. (required) :param str id: Project unique identifier. For example, in the URL:[https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), how-to-add-depth-to-your-data-with-the-us-census-acs is the unique identifier of the project. (required) :return: ProjectSummaryResponse If the method is called asynchronously, returns the request thread.
1.47199
1.542798
0.954105
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.patch_project_with_http_info(owner, id, **kwargs) else: (data) = self.patch_project_with_http_info(owner, id, **kwargs) return data
def patch_project(self, owner, id, **kwargs)
Update a project Update an existing project. Note that only elements, files or linked datasets included in the request will be updated. All omitted elements, files or linked datasets will remain untouched. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.patch_project(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), government is the unique identifier of the owner. (required) :param str id: Project unique identifier. For example, in the URL:[https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), how-to-add-depth-to-your-data-with-the-us-census-acs is the unique identifier of the project. (required) :param ProjectPatchRequest body: :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.487815
1.571849
0.946538
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.remove_linked_dataset_with_http_info(owner, id, linked_dataset_owner, linked_dataset_id, **kwargs) else: (data) = self.remove_linked_dataset_with_http_info(owner, id, linked_dataset_owner, linked_dataset_id, **kwargs) return data
def remove_linked_dataset(self, owner, id, linked_dataset_owner, linked_dataset_id, **kwargs)
Unlink dataset Remove a linked dataset from a project. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.remove_linked_dataset(owner, id, linked_dataset_owner, linked_dataset_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), government is the unique identifier of the owner. (required) :param str id: Project unique identifier. For example, in the URL:[https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), how-to-add-depth-to-your-data-with-the-us-census-acs is the unique identifier of the project. (required) :param str linked_dataset_owner: (required) :param str linked_dataset_id: (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.316856
1.39599
0.943313
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.replace_project_with_http_info(owner, id, **kwargs) else: (data) = self.replace_project_with_http_info(owner, id, **kwargs) return data
def replace_project(self, owner, id, **kwargs)
Create / Replace a project Create a project with a given id or completely rewrite the project, including any previously added files or linked datasets, if one already exists with the given id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.replace_project(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), government is the unique identifier of the owner. (required) :param str id: Project unique identifier. For example, in the URL:[https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), how-to-add-depth-to-your-data-with-the-us-census-acs is the unique identifier of the project. (required) :param ProjectCreateRequest body: :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.472732
1.559954
0.944087
return _get_instance(profile, **kwargs). \ load_dataset(dataset_key, force_update=force_update, auto_update=auto_update)
def load_dataset(dataset_key, force_update=False, auto_update=False, profile='default', **kwargs)
Load a dataset from the local filesystem, downloading it from data.world first, if necessary. This function returns an object of type `LocalDataset`. The object allows access to metedata via it's `describe()` method and to all the data via three properties `raw_data`, `tables` and `dataframes`, all of which are mappings (dict-like structures). :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param force_update: Flag, indicating if a new copy of the dataset should be downloaded replacing any previously downloaded copy (Default value = False) :type force_update: bool :param auto_update: Flag, indicating that dataset be updated to the latest version :type auto_update: bool :param profile: Configuration profile (account) to use. (Default value = 'default') :type profile: str, optional :returns: The object representing the dataset :rtype: LocalDataset :raises RestApiError: If a server error occurs Examples -------- >>> import datadotworld as dw >>> dataset = dw.load_dataset('jonloyens/an-intro-to-dataworld-dataset') >>> list(dataset.dataframes) ['changelog', 'datadotworldbballstats', 'datadotworldbballteam']
4.252951
7.541389
0.563948
return _get_instance(profile, **kwargs).query(dataset_key, query, query_type=query_type, parameters=parameters, **kwargs)
def query(dataset_key, query, query_type='sql', profile='default', parameters=None, **kwargs)
Query an existing dataset :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param query: SQL or SPARQL query :type query: str :param query_type: The type of the query. Must be either 'sql' or 'sparql'. (Default value = 'sql') :type query_type: {'sql', 'sparql'}, optional :param parameters: parameters to the query - if SPARQL query, this should be a dict containing named parameters, if SQL query, then this should be a list containing positional parameters. Boolean values will be converted to xsd:boolean, Integer values to xsd:integer, and other Numeric values to xsd:decimal. anything else is treated as a String literal (Default value = None) :type parameters: query parameters, optional :param profile: Configuration profile (account) to use. (Default value = 'default') :type profile: str, optional :returns: Object containing the results of the query :rtype: Results :raises RuntimeError: If a server error occurs Examples -------- >>> import datadotworld as dw >>> results = dw.query( ... 'jonloyens/an-intro-to-dataworld-dataset', ... 'SELECT * FROM `DataDotWorldBBallStats`, `DataDotWorldBBallTeam` ' ... 'WHERE DataDotWorldBBallTeam.Name = DataDotWorldBBallStats.Name') >>> df = results.dataframe >>> df.shape (8, 6)
3.411622
6.866388
0.496858
return _get_instance(profile, **kwargs).open_remote_file( dataset_key, file_name, mode=mode, **kwargs)
def open_remote_file(dataset_key, file_name, profile='default', mode='w', **kwargs)
Open a remote file object that can be used to write to or read from a file in a data.world dataset :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :param file_name: The name of the file to open :type file_name: str :param mode: the mode for the file - must be 'w', 'wb', 'r', or 'rb' - indicating read/write ('r'/'w') and optionally "binary" handling of the file data. (Default value = 'w') :type mode: str, optional :param chunk_size: size of chunked bytes to return when reading streamed bytes in 'rb' mode :type chunk_size: int, optional :param decode_unicode: whether to decode textual responses as unicode when returning streamed lines in 'r' mode :type decode_unicode: bool, optional :param profile: (Default value = 'default') :param **kwargs: Examples -------- >>> import datadotworld as dw >>> >>> # write a text file >>> with dw.open_remote_file('username/test-dataset', ... 'test.txt') as w: ... w.write("this is a test.") >>> >>> # write a jsonlines file >>> import json >>> with dw.open_remote_file('username/test-dataset', ... 'test.jsonl') as w: ... json.dump({'foo':42, 'bar':"A"}, w) ... w.write("\\n") ... json.dump({'foo':13, 'bar':"B"}, w) ... w.write("\\n") >>> >>> # write a csv file >>> import csv >>> with dw.open_remote_file('username/test-dataset', ... 'test.csv') as w: ... csvw = csv.DictWriter(w, fieldnames=['foo', 'bar']) ... csvw.writeheader() ... csvw.writerow({'foo':42, 'bar':"A"}) ... csvw.writerow({'foo':13, 'bar':"B"}) >>> >>> # write a pandas dataframe as a csv file >>> import pandas as pd >>> df = pd.DataFrame({'foo':[1,2,3,4],'bar':['a','b','c','d']}) >>> with dw.open_remote_file('username/test-dataset', ... 'dataframe.csv') as w: ... df.to_csv(w, index=False) >>> >>> # write a binary file >>> with dw.open_remote_file('username/test-dataset', >>> 'test.txt', mode='wb') as w: ... w.write(bytes([100,97,116,97,46,119,111,114,108,100])) >>> >>> # read a text file >>> with dw.open_remote_file('username/test-dataset', ... 'test.txt', mode='r') as r: ... print(r.read()) >>> >>> # read a csv file >>> with dw.open_remote_file('username/test-dataset', ... 'test.csv', mode='r') as r: ... csvr = csv.DictReader(r) ... for row in csvr: ... print(row['column a'], row['column b']) >>> >>> # read a binary file >>> with dw.open_remote_file('username/test-dataset', ... 'test', mode='rb') as r: ... bytes = r.read()
4.238397
6.815855
0.621844
if self._table is None: self._table = list(self._iter_rows()) return self._table
def table(self)
Build and cache a table from query results
5.268587
4.076241
1.292511
if self._dataframe is None: try: import pandas as pd except ImportError: raise RuntimeError('To enable dataframe support, ' 'run \'pip install datadotworld[pandas]\'') self._dataframe = pd.DataFrame.from_records(self._iter_rows(), coerce_float=True) return self._dataframe
def dataframe(self)
Build and cache a dataframe from query results
4.308845
3.802791
1.133074
datetime_types = ['date', 'datetime'] datetime_fields = { f['name']: _TABLE_SCHEMA_DTYPE_MAPPING.get(f['type'], 'object') for f in schema['fields'] if f['type'] in datetime_types} other_fields = { f['name']: _TABLE_SCHEMA_DTYPE_MAPPING.get(f['type'], 'object') for f in schema['fields'] if f['type'] not in datetime_types} return {'dates': datetime_fields, 'other': other_fields}
def fields_to_dtypes(schema)
Maps table schema fields types to dtypes separating date fields :param schema:
2.47736
2.574674
0.962203
if 'schema' in r.descriptor: r.descriptor['schema'] = _sanitize_schema(r.descriptor['schema']) return r
def sanitize_resource_schema(r)
Sanitize table schema for increased compatibility Up to version 0.9.0 jsontableschema did not support year, yearmonth and duration field types https://github.com/frictionlessdata/jsontableschema-py/pull/152 :param r:
3.983369
5.275082
0.755129
if ('results' in sparql_results_json and 'bindings' in sparql_results_json['results'] and len(sparql_results_json['results']['bindings']) > 0): # SQL results include metadata, SPARQL results don't result_metadata = sparql_results_json.get('metadata', []) metadata_names = [item['name'] for item in result_metadata] result_vars = sparql_results_json['head']['vars'] _verify_unique_names(result_vars, metadata_names) # SQL results require var name mapping, SPARQL results vars don't result_vars_mapping = dict(zip( result_vars, (metadata_names if metadata_names != [] else result_vars))) homogeneous_types = _get_types_from_sample( result_vars, sparql_results_json) fields = [] if homogeneous_types is None: for result_var in result_vars: fields.append({ 'name': result_vars_mapping.get(result_var), 'type': 'string' }) else: for index, var in enumerate(result_vars): field = { 'name': result_vars_mapping.get(var), 'type': infer_table_schema_type_from_rdf_term( homogeneous_types[var].get('type'), homogeneous_types[var].get('datatype') )} if 'datatype' in homogeneous_types.get(var): field['rdfType'] = homogeneous_types[var].get('datatype') term_metadata = (result_metadata[index] if result_metadata != [] else {}) if 'description' in term_metadata: field['description'] = term_metadata['description'] fields.append(field) return _sanitize_schema({'fields': fields}) elif 'boolean' in sparql_results_json: # ASK query results return {'fields': [{'name': 'boolean', 'type': 'boolean'}]} else: warn('Unable to infer table schema from empty query results') return None
def infer_table_schema(sparql_results_json)
Infer Table Schema from SPARQL results JSON SPARQL JSON Results Spec: https://www.w3.org/TR/2013/REC-sparql11-results-json-20130321 :param sparql_results_json: SPARQL JSON results of a query :returns: A schema descriptor for the inferred schema :rtype: dict (json)
2.828179
2.844773
0.994167
fields_idx = {f: pos for pos, f in enumerate(fields)} return OrderedDict(sorted(unordered_row.items(), key=lambda i: fields_idx[i[0]]))
def order_columns_in_row(fields, unordered_row)
Ensure columns appear in the same order for every row in table :param fields: :param unordered_row:
3.80702
4.507975
0.844508
total_bindings = len(sparql_results_json['results']['bindings']) homogeneous_types = {} for result_var in result_vars: var_types = set() var_datatypes = set() for i in range(0, min(total_bindings, 10)): binding = sparql_results_json['results']['bindings'][i] rdf_term = binding.get(result_var) if rdf_term is not None: # skip missing values var_types.add(rdf_term.get('type')) var_datatypes.add(rdf_term.get('datatype')) if len(var_types) > 1 or len(var_datatypes) > 1: return None # Heterogeneous types else: homogeneous_types[result_var] = { 'type': var_types.pop() if var_types else None, 'datatype': var_datatypes.pop() if var_datatypes else None } return homogeneous_types
def _get_types_from_sample(result_vars, sparql_results_json)
Return types if homogenous within sample Compare up to 10 rows of results to determine homogeneity. DESCRIBE and CONSTRUCT queries, for example, :param result_vars: :param sparql_results_json:
2.053899
2.061246
0.996436
with open(self._config_file_path, 'w') as file: self._config_parser.write(file)
def save(self)
Persist config changes
4.136896
3.460712
1.195389
for i in seq: obj = supplier_func(i) if obj is not None: return obj return None
def _first_not_none(seq, supplier_func)
Applies supplier_func to each element in seq, returns 1st not None :param seq: Sequence of object :type seq: iterable :param supplier_func: Function that extracts the desired value from elements in seq :type supplier_func: function
4.094953
3.930348
1.04188
if ctx.obj is None: ctx.obj = {} ctx.obj['profile'] = profile pass
def cli(ctx, profile)
dw commands support working with multiple data.world accounts \b Use a different <profile> value for each account. In the absence of a <profile>, 'default' will be used.
3.167611
4.64059
0.682588
config = obj.get('config') or FileConfig(obj['profile']) config.auth_token = token config.save()
def configure(obj, token)
Use this command to configure API tokens
7.579834
7.788216
0.973244
if request_entity is not None and len(request_entity) > 10000: raise ValueError("Invalid value for `request_entity`, length must be less than or equal to `10000`") self._request_entity = request_entity
def request_entity(self, request_entity)
Sets the request_entity of this FileSourceCreateOrUpdateRequest. :param request_entity: The request_entity of this FileSourceCreateOrUpdateRequest. :type: str
2.270246
2.020044
1.12386
if title is None: raise ValueError("Invalid value for `title`, must not be `None`") if title is not None and len(title) > 128: raise ValueError("Invalid value for `title`, length must be less than or equal to `128`") if title is not None and len(title) < 1: raise ValueError("Invalid value for `title`, length must be greater than or equal to `1`") self._title = title
def title(self, title)
Sets the title of this InsightPutRequest. Insight title. :param title: The title of this InsightPutRequest. :type: str
1.578904
1.410844
1.11912
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.sparql_get_with_http_info(owner, id, query, **kwargs) else: (data) = self.sparql_get_with_http_info(owner, id, query, **kwargs) return data
def sparql_get(self, owner, id, query, **kwargs)
SPARQL query (via GET) This endpoint executes SPARQL queries against a dataset or data project. SPARQL results are available in a variety of formats. By default, `application/sparql-results+json` will be returned. Set the `Accept` header to one of the following values in accordance with your preference: - `application/sparql-results+xml` - `application/sparql-results+json` - `application/rdf+json` - `application/rdf+xml` - `text/csv` - `text/tab-separated-values` New to SPARQL? Check out data.world’s[SPARQL tutorial](https://docs.data.world/tutorials/sparql/). This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.sparql_get(owner, id, query, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param str query: (required) :return: None If the method is called asynchronously, returns the request thread.
1.464318
1.531932
0.955864
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.sparql_post_with_http_info(owner, id, query, **kwargs) else: (data) = self.sparql_post_with_http_info(owner, id, query, **kwargs) return data
def sparql_post(self, owner, id, query, **kwargs)
SPARQL query This endpoint executes SPARQL queries against a dataset or data project. SPARQL results are available in a variety of formats. By default, `application/sparql-results+json` will be returned. Set the `Accept` header to one of the following values in accordance with your preference: - `application/sparql-results+xml` - `application/sparql-results+json` - `application/rdf+json` - `application/rdf+xml` - `text/csv` - `text/tab-separated-values` New to SPARQL? Check out data.world's [SPARQL tutorial](https://docs.data.world/tutorials/sparql/). This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.sparql_post(owner, id, query, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param str query: (required) :return: None If the method is called asynchronously, returns the request thread.
1.448258
1.53769
0.94184
try: return self._datasets_api.get_dataset( *(parse_dataset_key(dataset_key))).to_dict() except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def get_dataset(self, dataset_key)
Retrieve an existing dataset definition This method retrieves metadata about an existing :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :returns: Dataset definition, with all attributes :rtype: dict :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> intro_dataset = api_client.get_dataset( ... 'jonloyens/an-intro-to-dataworld-dataset') # doctest: +SKIP >>> intro_dataset['title'] # doctest: +SKIP 'An Intro to data.world Dataset'
7.516859
9.532334
0.788564
request = self.__build_dataset_obj( lambda: _swagger.DatasetCreateRequest( title=kwargs.get('title'), visibility=kwargs.get('visibility')), lambda name, url, expand_archive, description, labels: _swagger.FileCreateRequest( name=name, source=_swagger.FileSourceCreateRequest( url=url, expand_archive=expand_archive), description=description, labels=labels), kwargs) try: (_, _, headers) = self._datasets_api.create_dataset_with_http_info( owner_id, request, _return_http_data_only=False) if 'Location' in headers: return headers['Location'] except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def create_dataset(self, owner_id, **kwargs)
Create a new dataset :param owner_id: Username of the owner of the new dataset :type owner_id: str :param title: Dataset title (will be used to generate dataset id on creation) :type title: str :param description: Dataset description :type description: str, optional :param summary: Dataset summary markdown :type summary: str, optional :param tags: Dataset tags :type tags: list, optional :param license: Dataset license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Dataset visibility :type visibility: {'OPEN', 'PRIVATE'} :param files: File name as dict, source URLs, description and labels() as properties :type files: dict, optional *Description and labels are optional* :returns: Newly created dataset key :rtype: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> url = 'http://www.acme.inc/example.csv' >>> api_client.create_dataset( ... 'username', title='Test dataset', visibility='PRIVATE', ... license='Public Domain', ... files={'dataset.csv':{'url': url}}) # doctest: +SKIP
4.44013
4.785564
0.927817
request = self.__build_dataset_obj( lambda: _swagger.DatasetPatchRequest(), lambda name, url, expand_archive, description, labels: _swagger.FileCreateOrUpdateRequest( name=name, source=_swagger.FileSourceCreateOrUpdateRequest( url=url, expand_archive=expand_archive) if url is not None else None, description=description, labels=labels), kwargs) owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._datasets_api.patch_dataset(owner_id, dataset_id, request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def update_dataset(self, dataset_key, **kwargs)
Update an existing dataset :param description: Dataset description :type description: str, optional :param summary: Dataset summary markdown :type summary: str, optional :param tags: Dataset tags :type tags: list, optional :param license: Dataset license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Dataset visibility :type visibility: {'OPEN', 'PRIVATE'}, optional :param files: File names and source URLs to add or update :type files: dict, optional :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.update_dataset( ... 'username/test-dataset', ... tags=['demo', 'datadotworld']) # doctest: +SKIP
4.799331
5.312483
0.903406
request = self.__build_dataset_obj( lambda: _swagger.DatasetPutRequest( title=kwargs.get('title'), visibility=kwargs.get('visibility') ), lambda name, url, expand_archive, description, labels: _swagger.FileCreateRequest( name=name, source=_swagger.FileSourceCreateRequest( url=url, expand_archive=expand_archive), description=description, labels=labels), kwargs) owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._datasets_api.replace_dataset(owner_id, dataset_id, request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def replace_dataset(self, dataset_key, **kwargs)
Replace an existing dataset *This method will completely overwrite an existing dataset.* :param description: Dataset description :type description: str, optional :param summary: Dataset summary markdown :type summary: str, optional :param tags: Dataset tags :type tags: list, optional :param license: Dataset license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Dataset visibility :type visibility: {'OPEN', 'PRIVATE'} :param files: File names and source URLs to add or update :type files: dict, optional :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.replace_dataset( ... 'username/test-dataset', ... visibility='PRIVATE', license='Public Domain', ... description='A better description') # doctest: +SKIP
5.041004
5.536165
0.910559
owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._datasets_api.delete_dataset(owner_id, dataset_id) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def delete_dataset(self, dataset_key)
Deletes a dataset and all associated data :params dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.delete_dataset( ... 'username/dataset') # doctest: +SKIP
4.476936
5.416165
0.826588
file_requests = [_swagger.FileCreateOrUpdateRequest( name=file_name, source=_swagger.FileSourceCreateOrUpdateRequest( url=file_info['url'], expand_archive=file_info.get('expand_archive', False)), description=file_info.get('description'), labels=file_info.get('labels'), ) for file_name, file_info in files.items()] owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._datasets_api.add_files_by_source( owner_id, dataset_id, _swagger.FileBatchUpdateRequest(files=file_requests)) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def add_files_via_url(self, dataset_key, files={})
Add or update dataset files linked to source URLs :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :param files: Dict containing the name of files and metadata Uses file name as a dict containing File description, labels and source URLs to add or update (Default value = {}) *description and labels are optional.* :type files: dict :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> url = 'http://www.acme.inc/example.csv' >>> api_client = dw.api_client() >>> api_client.add_files_via_url( ... 'username/test-dataset', ... {'example.csv': { ... 'url': url, ... 'labels': ['raw data'], ... 'description': 'file description'}}) # doctest: +SKIP
3.633158
3.705178
0.980562
try: self._datasets_api.sync(*(parse_dataset_key(dataset_key))) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def sync_files(self, dataset_key)
Trigger synchronization process to update all dataset files linked to source URLs. :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.sync_files('username/test-dataset') # doctest: +SKIP
10.771892
13.707026
0.785866
owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._uploads_api.upload_files(owner_id, dataset_id, files, **kwargs) if files_metadata: self.update_dataset(dataset_key, files=files_metadata) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def upload_files(self, dataset_key, files, files_metadata={}, **kwargs)
Upload dataset files :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :param files: The list of names/paths for files stored in the local filesystem :type files: list of str :param expand_archives: Boolean value to indicate files should be expanded upon upload :type expand_archive: bool optional :param files_metadata: Dict containing the name of files and metadata Uses file name as a dict containing File description, labels and source URLs to add or update :type files_metadata: dict optional :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.upload_files( ... 'username/test-dataset', ... ['/my/local/example.csv']) # doctest: +SKIP
3.790221
5.302504
0.714798
owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._uploads_api.upload_file(owner_id, dataset_id, name, **kwargs) if file_metadata: self.update_dataset(dataset_key, files=file_metadata) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def upload_file(self, dataset_key, name, file_metadata={}, **kwargs)
Upload one file to a dataset :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :param name: Name/path for files stored in the local filesystem :type name: str :param expand_archives: Boolean value to indicate files should be expanded upon upload :type expand_archive: bool optional :param files_metadata: Dict containing the name of files and metadata Uses file name as a dict containing File description, labels and source URLs to add or update :type files_metadata: dict optional :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.upload_file( ... 'username/test-dataset', ... 'example.csv') # doctest: +SKIP
3.717373
5.273297
0.704943
owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._datasets_api.delete_files_and_sync_sources( owner_id, dataset_id, names) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def delete_files(self, dataset_key, names)
Delete dataset file(s) :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :param names: The list of names for files to be deleted :type names: list of str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.delete_files( ... 'username/test-dataset', ['example.csv']) # doctest: +SKIP
5.439777
7.337428
0.741374
if path.isdir(dest_dir): raise ValueError('dest_dir must be a new directory, ' 'but {} already exists'.format(dest_dir)) owner_id, dataset_id = parse_dataset_key(dataset_key) url = "{0}://{1}/datapackage/{2}/{3}".format( self._protocol, self._download_host, owner_id, dataset_id) headers = { 'User-Agent': _user_agent(), 'Authorization': 'Bearer {0}'.format(self._config.auth_token) } try: response = requests.get(url, headers=headers, stream=True) response.raise_for_status() except requests.RequestException as e: raise RestApiError(cause=e) unzip_dir = path.join(self._config.tmp_dir, str(uuid.uuid4())) os.makedirs(unzip_dir) zip_file = path.join(unzip_dir, 'dataset.zip') with open(zip_file, 'wb') as f: for data in response.iter_content(chunk_size=4096): f.write(data) zip_obj = zipfile.ZipFile(zip_file) zip_obj.extractall(path=unzip_dir) # Find where datapackage.json is within expanded files unzipped_descriptor = glob.glob( '{}/**/datapackage.json'.format(unzip_dir)) if not unzipped_descriptor: raise RuntimeError( 'Zip file did not contain a datapackage manifest.') unzipped_dir = path.dirname(unzipped_descriptor[0]) shutil.move(unzipped_dir, dest_dir) shutil.rmtree(unzip_dir, ignore_errors=True) return path.join(dest_dir, 'datapackage.json')
def download_datapackage(self, dataset_key, dest_dir)
Download and unzip a dataset's datapackage :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :param dest_dir: Directory under which datapackage should be saved :type dest_dir: str or path :returns: Location of the datapackage descriptor (datapackage.json) in the local filesystem :rtype: path :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> datapackage_descriptor = api_client.download_datapackage( ... 'jonloyens/an-intro-to-dataworld-dataset', ... '/tmp/test') # doctest: +SKIP >>> datapackage_descriptor # doctest: +SKIP '/tmp/test/datapackage.json'
2.501602
2.624119
0.953311
try: return self._user_api.get_user_data().to_dict() except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def get_user_data(self)
Retrieve data for authenticated user :returns: User data, with all attributes :rtype: dict :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> user_data = api_client.get_user_data() # doctest: +SKIP >>> user_data[display_name] # doctest: +SKIP 'Name User'
7.237841
8.646677
0.837066
try: return self._user_api.fetch_contributing_projects( **kwargs).to_dict() except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def fetch_contributing_projects(self, **kwargs)
Fetch projects that the currently authenticated user has access to :returns: Authenticated user projects :rtype: dict :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> user_projects = ... api_client.fetch_contributing_projects() # doctest: +SKIP {'count': 0, 'records': [], 'next_page_token': None}
6.950372
8.377724
0.829625
api_client = self._build_api_client( default_mimetype_header_accept=desired_mimetype) sql_api = kwargs.get('sql_api_mock', _swagger.SqlApi(api_client)) owner_id, dataset_id = parse_dataset_key(dataset_key) try: response = sql_api.sql_post( owner_id, dataset_id, query, _preload_content=False, **kwargs) return six.BytesIO(response.data) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def sql(self, dataset_key, query, desired_mimetype='application/json', **kwargs)
Executes SQL queries against a dataset via POST :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :param query: SQL query :type query: str :param include_table_schema: Flags indicating to include table schema in the response :type include_table_schema: bool :returns: file object that can be used in file parsers and data handling modules. :rtype: file-like object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.sql('username/test-dataset', 'query') # doctest: +SKIP
4.591908
5.102289
0.89997
api_client = self._build_api_client( default_mimetype_header_accept=desired_mimetype) sparql_api = kwargs.get('sparql_api_mock', _swagger.SparqlApi(api_client)) owner_id, dataset_id = parse_dataset_key(dataset_key) try: response = sparql_api.sparql_post( owner_id, dataset_id, query, _preload_content=False, **kwargs) return six.BytesIO(response.data) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def sparql(self, dataset_key, query, desired_mimetype='application/sparql-results+json', **kwargs)
Executes SPARQL queries against a dataset via POST :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :param query: SPARQL query :type query: str :returns: file object that can be used in file parsers and data handling modules. :rtype: file object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.sparql_post('username/test-dataset', ... query) # doctest: +SKIP
4.573383
4.695522
0.973988
owner_id, dataset_id = parse_dataset_key(dataset_key) try: return self._download_api.download_dataset(owner_id, dataset_id) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def download_dataset(self, dataset_key)
Return a .zip containing all files within the dataset as uploaded. :param dataset_key : Dataset identifier, in the form of owner/id :type dataset_key: str :returns: .zip file contain files within dataset :rtype: file object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.download_dataset( ... 'username/test-dataset') # doctest: +SKIP
4.566533
5.748378
0.794404
owner_id, dataset_id = parse_dataset_key(dataset_key) try: return self._streams_api.append_records(owner_id, dataset_id, stream_id, body) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def append_records(self, dataset_key, stream_id, body)
Append records to a stream. :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :param stream_id: Stream unique identifier. :type stream_id: str :param body: Object body :type body: obj :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.append_records('username/test-dataset','streamId', ... {'content':'content'}) # doctest: +SKIP
4.241158
5.839172
0.726329
try: owner_id, project_id = parse_dataset_key(project_key) return self._projects_api.get_project(owner_id, project_id).to_dict() except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def get_project(self, project_key)
Retrieve an existing project This method retrieves metadata about an existing project :param project_key: Project identifier, in the form of owner/id :type project_key: str :returns: Project definition, with all attributes :rtype: dict :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> intro_project = api_client.get_project( ... 'jonloyens/' ... 'an-example-project-that-shows-what-to-put-in-data-world' ... ) # doctest: +SKIP >>> intro_project['title'] # doctest: +SKIP 'An Example Project that Shows What To Put in data.world'
5.097481
6.354125
0.802232
request = self.__build_project_obj( lambda: _swagger.ProjectCreateRequest( title=kwargs.get('title'), visibility=kwargs.get('visibility') ), lambda name, url, description, labels: _swagger.FileCreateRequest( name=name, source=_swagger.FileSourceCreateRequest(url=url), description=description, labels=labels), kwargs) try: (_, _, headers) = self._projects_api.create_project_with_http_info( owner_id, body=request, _return_http_data_only=False) if 'Location' in headers: return headers['Location'] except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def create_project(self, owner_id, **kwargs)
Create a new project :param owner_id: Username of the creator of a project. :type owner_id: str :param title: Project title (will be used to generate project id on creation) :type title: str :param objective: Short project objective. :type objective: str, optional :param summary: Long-form project summary. :type summary: str, optional :param tags: Project tags. Letters numbers and spaces :type tags: list, optional :param license: Project license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Project visibility :type visibility: {'OPEN', 'PRIVATE'} :param files: File name as dict, source URLs, description and labels() as properties :type files: dict, optional *Description and labels are optional* :param linked_datasets: Initial set of linked datasets. :type linked_datasets: list of object, optional :returns: Newly created project key :rtype: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.create_project( ... 'username', title='project testing', ... visibility='PRIVATE', ... linked_datasets=[{'owner': 'someuser', ... 'id': 'somedataset'}]) # doctest: +SKIP
4.246566
4.403484
0.964365
request = self.__build_project_obj( lambda: _swagger.ProjectPatchRequest(), lambda name, url, description, labels: _swagger.FileCreateOrUpdateRequest( name=name, source=_swagger.FileSourceCreateOrUpdateRequest(url=url), description=description, labels=labels), kwargs) owner_id, project_id = parse_dataset_key(project_key) try: return self._projects_api.patch_project(owner_id, project_id, body=request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def update_project(self, project_key, **kwargs)
Update an existing project :param project_key: Username and unique identifier of the creator of a project in the form of owner/id. :type project_key: str :param title: Project title :type title: str :param objective: Short project objective. :type objective: str, optional :param summary: Long-form project summary. :type summary: str, optional :param tags: Project tags. Letters numbers and spaces :type tags: list, optional :param license: Project license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Project visibility :type visibility: {'OPEN', 'PRIVATE'} :param files: File name as dict, source URLs, description and labels() as properties :type files: dict, optional *Description and labels are optional* :param linked_datasets: Initial set of linked datasets. :type linked_datasets: list of object, optional :returns: message object :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.update_project( ... 'username/test-project', ... tags=['demo', 'datadotworld']) # doctest: +SKIP
5.127034
5.385196
0.952061
request = self.__build_project_obj( lambda: _swagger.ProjectCreateRequest( title=kwargs.get('title'), visibility=kwargs.get('visibility') ), lambda name, url, description, labels: _swagger.FileCreateRequest( name=name, source=_swagger.FileSourceCreateRequest(url=url), description=description, labels=labels), kwargs) try: project_owner_id, project_id = parse_dataset_key(project_key) self._projects_api.replace_project(project_owner_id, project_id, body=request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def replace_project(self, project_key, **kwargs)
Replace an existing Project *Create a project with a given id or completely rewrite the project, including any previously added files or linked datasets, if one already exists with the given id.* :param project_key: Username and unique identifier of the creator of a project in the form of owner/id. :type project_key: str :param title: Project title :type title: str :param objective: Short project objective. :type objective: str, optional :param summary: Long-form project summary. :type summary: str, optional :param tags: Project tags. Letters numbers and spaces :type tags: list, optional :param license: Project license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Project visibility :type visibility: {'OPEN', 'PRIVATE'} :param files: File name as dict, source URLs, description and labels() as properties :type files: dict, optional *Description and labels are optional* :param linked_datasets: Initial set of linked datasets. :type linked_datasets: list of object, optional :returns: project object :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.replace_project( ... 'username/test-project', ... visibility='PRIVATE', ... objective='A better objective', ... title='Replace project') # doctest: +SKIP
4.844431
4.786182
1.01217
try: project_owner_id, project_id = parse_dataset_key(project_key) dataset_owner_id, dataset_id = parse_dataset_key(dataset_key) self._projects_api.add_linked_dataset(project_owner_id, project_id, dataset_owner_id, dataset_id) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def add_linked_dataset(self, project_key, dataset_key)
Link project to an existing dataset This method links a dataset to project :param project_key: Project identifier, in the form of owner/id :type project_key: str :param dataset_key: Dataset identifier, in the form of owner/id :type project_key: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> linked_dataset = api_client.add_linked_dataset( ... 'username/test-project', ... 'username/test-dataset') # doctest: +SKIP
2.977828
3.216328
0.925847
try: project_owner, project_id = parse_dataset_key(project_key) return self._insights_api.get_insight(project_owner, project_id, insight_id, **kwargs).to_dict() except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def get_insight(self, project_key, insight_id, **kwargs)
Retrieve an insight :param project_key: Project identifier, in the form of projectOwner/projectid :type project_key: str :param insight_id: Insight unique identifier. :type insight_id: str :returns: Insight definition, with all attributes :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> insight = api_client.get_insight( ... 'jonloyens/' ... 'an-example-project-that-shows-what-to-put-in-data-world', ... 'c2538b0c-c200-474c-9631-5ff4f13026eb') # doctest: +SKIP >>> insight['title'] # doctest: +SKIP 'Coast Guard Lives Saved by Fiscal Year'
4.558425
5.143166
0.886307
try: project_owner, project_id = parse_dataset_key(project_key) return self._insights_api.get_insights_for_project(project_owner, project_id, **kwargs) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def get_insights_for_project(self, project_key, **kwargs)
Get insights for a project. :param project_key: Project identifier, in the form of projectOwner/projectid :type project_key: str :returns: Insight results :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> insights = api_client.get_insights_for_project( ... 'jonloyens/' ... 'an-example-project-that-shows-what-to-put-in-data-world' ... ) # doctest: +SKIP
4.55457
4.967386
0.916895
request = self.__build_insight_obj( lambda: _swagger.InsightCreateRequest( title=kwargs.get('title'), body=_swagger.InsightBody( image_url=kwargs.get('image_url'), embed_url=kwargs.get('embed_url'), markdown_body=kwargs.get('markdown_body') ) ), kwargs) project_owner, project_id = parse_dataset_key(project_key) try: (_, _, headers) = self._insights_api.create_insight_with_http_info( project_owner, project_id, body=request, _return_http_data_only=False) if 'Location' in headers: return headers['Location'] except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def create_insight(self, project_key, **kwargs)
Create a new insight :param project_key: Project identifier, in the form of projectOwner/projectid :type project_key: str :param title: Insight title :type title: str :param description: Insight description. :type description: str, optional :param image_url: If image-based, the URL of the image :type image_url: str :param embed_url: If embed-based, the embeddable URL :type embed_url: str :param source_link: Permalink to source code or platform this insight was generated with. Allows others to replicate the steps originally used to produce the insight. :type source_link: str, optional :param data_source_links: One or more permalinks to the data sources used to generate this insight. Allows others to access the data originally used to produce the insight. :type data_source_links: array :returns: Insight with message and uri object :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.create_insight( ... 'projectOwner/projectid', title='Test insight', ... image_url='url') # doctest: +SKIP
3.760427
3.80637
0.98793
request = self.__build_insight_obj( lambda: _swagger.InsightPutRequest( title=kwargs.get('title'), body=_swagger.InsightBody( image_url=kwargs.get('image_url'), embed_url=kwargs.get('embed_url'), markdown_body=kwargs.get('markdown_body') ) ), kwargs) project_owner, project_id = parse_dataset_key(project_key) try: self._insights_api.replace_insight(project_owner, project_id, insight_id, body=request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def replace_insight(self, project_key, insight_id, **kwargs)
Replace an insight. :param project_key: Projrct identifier, in the form of projectOwner/projectid :type project_key: str :param insight_id: Insight unique identifier. :type insight_id: str :param title: Insight title :type title: str :param description: Insight description. :type description: str, optional :param image_url: If image-based, the URL of the image :type image_url: str :param embed_url: If embed-based, the embeddable URL :type embed_url: str :param source_link: Permalink to source code or platform this insight was generated with. Allows others to replicate the steps originally used to produce the insight. :type source_link: str, optional :param data_source_links: One or more permalinks to the data sources used to generate this insight. Allows others to access the data originally used to produce the insight. :type data_source_links: array :returns: message object :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.replace_insight( ... 'projectOwner/projectid', ... '1230-9324-3424242442', ... embed_url='url', ... title='Test insight') # doctest: +SKIP
4.224493
4.330254
0.975576
request = self.__build_insight_obj( lambda: _swagger.InsightPatchRequest(), kwargs) project_owner, project_id = parse_dataset_key(project_key) try: self._insights_api.update_insight(project_owner, project_id, insight_id, body=request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def update_insight(self, project_key, insight_id, **kwargs)
Update an insight. **Note that only elements included in the request will be updated. All omitted elements will remain untouched. :param project_key: Projrct identifier, in the form of projectOwner/projectid :type project_key: str :param insight_id: Insight unique identifier. :type insight_id: str :param title: Insight title :type title: str :param description: Insight description. :type description: str, optional :param image_url: If image-based, the URL of the image :type image_url: str :param embed_url: If embed-based, the embeddable URL :type embed_url: str :param source_link: Permalink to source code or platform this insight was generated with. Allows others to replicate the steps originally used to produce the insight. :type source_link: str, optional :param data_source_links: One or more permalinks to the data sources used to generate this insight. Allows others to access the data originally used to produce the insight. :type data_source_links: array :returns: message object :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.update_insight( ... 'username/test-project', 'insightid' ... title='demo atadotworld'}) # doctest: +SKIP
6.535026
7.277898
0.897928
projectOwner, projectId = parse_dataset_key(project_key) try: self._insights_api.delete_insight(projectOwner, projectId, insight_id) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def delete_insight(self, project_key, insight_id)
Delete an existing insight. :params project_key: Project identifier, in the form of projectOwner/projectId :type project_key: str :params insight_id: Insight unique id :type insight_id: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> del_insight = api_client.delete_insight( ... 'username/project', 'insightid') # doctest: +SKIP
6.504088
7.033459
0.924735
match = re.match(DATASET_KEY_PATTERN, dataset_key) if not match: raise ValueError('Invalid dataset key. Key must include user and ' 'dataset names, separated by (i.e. user/dataset).') return match.groups()
def parse_dataset_key(dataset_key)
Parse a dataset URL or path and return the owner and the dataset id :param dataset_key: Dataset key (in the form of owner/id) or dataset URL :type dataset_key: str :returns: User name of the dataset owner and ID of the dataset :rtype: dataset_owner, dataset_id :raises ValueError: If the provided key does comply to the expected pattern Examples -------- >>> from datadotworld import util >>> util.parse_dataset_key( ... 'https://data.world/jonloyens/an-intro-to-datadotworld-dataset') ('jonloyens', 'an-intro-to-datadotworld-dataset') >>> util.parse_dataset_key('jonloyens/an-intro-to-datadotworld-dataset') ('jonloyens', 'an-intro-to-datadotworld-dataset')
4.38184
5.525053
0.793086
return cls({k: LazyLoadedValue( lambda k=k: loader_func(k), type_hint=type_hint) for k in keys})
def from_keys(cls, keys, loader_func, type_hint=None)
Factory method for `LazyLoadedDict` Accepts a ``loader_func`` that is to be applied to all ``keys``. :param keys: List of keys to create the dictionary with :type keys: iterable :param loader_func: Function to be applied to all keys :type loader_func: function :param type_hint: Expected type of lazy loaded values. Used by `LazyLoadedValue`. (Default value = None) :type type_hint: str :returns: A properly constructed lazy loaded dictionary :rtype: LazyLoadedDict
5.491101
4.328727
1.268526
if name is None: raise ValueError("Invalid value for `name`, must not be `None`") if name is not None and len(name) > 128: raise ValueError("Invalid value for `name`, length must be less than or equal to `128`") if name is not None and len(name) < 1: raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") if name is not None and not re.search('^[^\/]+$', name): raise ValueError("Invalid value for `name`, must be a follow pattern or equal to `/^[^\/]+$/`") self._name = name
def name(self, name)
Sets the name of this FileCreateOrUpdateRequest. File name. Should include type extension always when possible. Must not include slashes. :param name: The name of this FileCreateOrUpdateRequest. :type: str
1.471921
1.387412
1.060912
allowed_values = ["raw data", "documentation", "visualization", "clean data", "script", "report"] if not set(labels).issubset(set(allowed_values)): raise ValueError( "Invalid values for `labels` [{0}], must be a subset of [{1}]" .format(", ".join(map(str, set(labels)-set(allowed_values))), ", ".join(map(str, allowed_values))) ) self._labels = labels
def labels(self, labels)
Sets the labels of this FileCreateOrUpdateRequest. File labels. :param labels: The labels of this FileCreateOrUpdateRequest. :type: list[str]
3.039821
2.494991
1.218369
if user is None: raise ValueError("Invalid value for `user`, must not be `None`") if user is not None and len(user) > 1024: raise ValueError("Invalid value for `user`, length must be less than or equal to `1024`") self._user = user
def user(self, user)
Sets the user of this WebCredentials. The name of the account to login to. :param user: The user of this WebCredentials. :type: str
1.842158
1.872977
0.983545
if password is not None and len(password) > 1024: raise ValueError("Invalid value for `password`, length must be less than or equal to `1024`") self._password = password
def password(self, password)
Sets the password of this WebCredentials. The secret password. This field is write-only. It is omitted by read operations. If authorization is required, the `password` value must be provided whenever a File Source is created or modified. An update to a dataset that does not change the File Source may omit the `password` field--the update will preserve the previous value. :param password: The password of this WebCredentials. :type: str
2.422413
2.710828
0.893606
if owner is None: raise ValueError("Invalid value for `owner`, must not be `None`") if owner is not None and len(owner) > 31: raise ValueError("Invalid value for `owner`, length must be less than or equal to `31`") if owner is not None and len(owner) < 3: raise ValueError("Invalid value for `owner`, length must be greater than or equal to `3`") if owner is not None and not re.search('[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]', owner): raise ValueError("Invalid value for `owner`, must be a follow pattern or equal to `/[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]/`") self._owner = owner
def owner(self, owner)
Sets the owner of this OauthTokenReference. User name of the owner of the OAuth token within data.world. :param owner: The owner of this OauthTokenReference. :type: str
1.500206
1.480466
1.013334
if site is None: raise ValueError("Invalid value for `site`, must not be `None`") if site is not None and len(site) > 255: raise ValueError("Invalid value for `site`, length must be less than or equal to `255`") if site is not None and len(site) < 3: raise ValueError("Invalid value for `site`, length must be greater than or equal to `3`") if site is not None and not re.search('(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?\\.)+[a-z]{2,}(?:@[a-z0-9](?:[-.](?=[a-z0-9])|[a-z0-9]){0,29})?', site): raise ValueError("Invalid value for `site`, must be a follow pattern or equal to `/(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?\\.)+[a-z]{2,}(?:@[a-z0-9](?:[-.](?=[a-z0-9])|[a-z0-9]){0,29})?/`") self._site = site
def site(self, site)
Sets the site of this OauthTokenReference. :param site: The site of this OauthTokenReference. :type: str
1.601777
1.559635
1.02702
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.sql_get_with_http_info(owner, id, query, **kwargs) else: (data) = self.sql_get_with_http_info(owner, id, query, **kwargs) return data
def sql_get(self, owner, id, query, **kwargs)
SQL query (via GET) This endpoint executes SQL queries against a dataset. SQL results are available in a variety of formats. By default, `application/json` will be returned. Set the `Accept` header to one of the following values in accordance with your preference: * `text/csv` * `application/json` * `application/json-l` * `application/x-ndjson` New to SQL? Check out data.world's [SQL manual](https://docs.data.world/tutorials/dwsql/) . This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.sql_get(owner, id, query, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param str query: (required) :param bool include_table_schema: Flags indicating to include table schema in the response. :return: None If the method is called asynchronously, returns the request thread.
1.52894
1.549823
0.986525
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.sql_post_with_http_info(owner, id, query, **kwargs) else: (data) = self.sql_post_with_http_info(owner, id, query, **kwargs) return data
def sql_post(self, owner, id, query, **kwargs)
SQL query This endpoint executes SQL queries against a dataset. SQL results are available in a variety of formats. By default, `application/json` will be returned. Set the `Accept` header to one of the following values in accordance with your preference: * `text/csv` * `application/json` * `application/json-l` * `application/x-ndjson` New to SQL? Check out data.world's [SQL manual](https://docs.data.world/tutorials/dwsql/) . This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.sql_post(owner, id, query, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param str query: (required) :param bool include_table_schema: Flags indicating to include table schema in the response. :return: None If the method is called asynchronously, returns the request thread.
1.489161
1.551659
0.959722
if url is None: raise ValueError("Invalid value for `url`, must not be `None`") if url is not None and len(url) > 4096: raise ValueError("Invalid value for `url`, length must be less than or equal to `4096`") if url is not None and len(url) < 1: raise ValueError("Invalid value for `url`, length must be greater than or equal to `1`") if url is not None and not re.search('^https?:.*', url): raise ValueError("Invalid value for `url`, must be a follow pattern or equal to `/^https?:.*/`") self._url = url
def url(self, url)
Sets the url of this FileSourceCreateRequest. Source URL of file. Must be an http, https. :param url: The url of this FileSourceCreateRequest. :type: str
1.600459
1.527159
1.047998
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.upload_file_with_http_info(owner, id, name, **kwargs) else: (data) = self.upload_file_with_http_info(owner, id, name, **kwargs) return data
def upload_file(self, owner, id, name, **kwargs)
Upload file Upload one file at a time to a dataset. This endpoint expects requests of type `application/octet-stream`. For example, assuming that you want to upload a local file named `file1.csv` to a hypothetical dataset `https://data.world/awesome-user/awesome-dataset` and choose its name on data.world to be `better-name.csv`, this is what the cURL command would look like. ```bash curl \\ -H \"Authorization: Bearer <YOUR_API_TOKEN>\" \\ -X PUT -H \"Content-Type: application/octet-stream\" \\ --data-binary @file1.csv \\ https://api.data.world/v0/uploads/awesome-user/awesome-dataset/files/better-name.csv ``` This method of upload is typically not supported by Swagger clients. Other HTTP clients can be used to supply the contents of the file directly in the body of the request. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.upload_file(owner, id, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param str name: File name and unique identifier within dataset. (required) :param bool expand_archive: Indicates whether a compressed file should be expanded upon upload. :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.416167
1.495496
0.946954
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.upload_files_with_http_info(owner, id, file, **kwargs) else: (data) = self.upload_files_with_http_info(owner, id, file, **kwargs) return data
def upload_files(self, owner, id, file, **kwargs)
Upload files Upload multiple files at once to a dataset via multipart request. This endpoint expects requests of type `multipart/form-data` and you can include one or more parts named `file`, each containing a different file to be uploaded. For example, assuming that, you want to upload two local files named `file1.csv` and `file2.csv` to a hypothetical dataset `https://data.world/awesome-user/awesome-dataset`, this is what the cURL command would look like. ```bash curl \\ -H \"Authorization: Bearer <YOUR_API_TOKEN>\" \\ -F \"file=@file1.csv\" \\ -F \"file=@file2.csv\" \\ https://api.data.world/v0/uploads/awesome-user/awesome-dataset/files ``` Swagger clients will limit this method of upload to one file at a time. Other HTTP clients capable of making multipart/form-data requests can be used to upload multiple files in a single request. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.upload_files(owner, id, file, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param file file: Multipart-encoded file contents (required) :param bool expand_archives: Indicates whether compressed files should be expanded upon upload. :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.423666
1.502451
0.947562
# TODO Move network request to RestApiClient owner_id, dataset_id = parse_dataset_key(dataset_key) params = { "query": query } if parameters and query_type == "sparql": # if SPARQL, then the parameters should be a Mapping containing # named parameters params["parameters"] = ",".join( ["{}={}".format(k, convert_to_sparql_literal(parameters[k])) for k in parameters.keys()]) elif parameters and query_type == "sql": # if SQL, then the parameters should be an array with positional # parameters, need to unwind them to $data_world_paramN for each # 0-indexed position N parameters = {"$data_world_param{}".format(i): x for i, x in enumerate(parameters)} params["parameters"] = ",".join(["{}={}".format( k, convert_to_sparql_literal(parameters[k])) for k in parameters.keys()]) url = "{0}://{1}/{2}/{3}/{4}".format(self._protocol, self._query_host, query_type, owner_id, dataset_id) headers = { 'User-Agent': _user_agent(), 'Accept': 'application/sparql-results+json', 'Authorization': 'Bearer {0}'.format(self._config.auth_token) } response = requests.get(url, params=params, headers=headers) if response.status_code == 200: return QueryResults(response.json()) raise RuntimeError( 'Error executing query: {}'.format(response.content))
def query(self, dataset_key, query, query_type="sql", parameters=None)
Query an existing dataset :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param query: SQL or SPARQL query :type query: str :param query_type: The type of the query. Must be either 'sql' or 'sparql'. (Default value = "sql") :type query_type: {'sql', 'sparql'}, optional :param parameters: parameters to the query - if SPARQL query, this should be a dict containing named parameters, if SQL query,then this should be a list containing positional parameters. Boolean values will be converted to xsd:boolean, Integer values to xsd:integer, and other Numeric values to xsd:decimal. Anything else is treated as a String literal (Default value = None) :type parameters: query parameters, optional :returns: Object containing the results of the query :rtype: Results :raises RuntimeError: If a server error occurs
3.391372
3.14238
1.079237
owner_id, dataset_id = parse_dataset_key(dataset_key) cache_dir = path.join(self._config.cache_dir, owner_id, dataset_id, 'latest') backup_dir = None if path.isdir(cache_dir) and force_update: backup_dir = path.join(self._config.cache_dir, owner_id, dataset_id, 'backup') move_cache_dir_to_backup_dir(backup_dir, cache_dir) descriptor_file = path.join(cache_dir, 'datapackage.json') if not path.isfile(descriptor_file): try: descriptor_file = self.api_client.download_datapackage( dataset_key, cache_dir) except RestApiError as e: if backup_dir is not None: shutil.move(backup_dir, cache_dir) warn('Unable to download datapackage ({}). ' 'Loading previously saved version.'.format(e.reason)) else: raise else: try: dataset_info = self.api_client.get_dataset(dataset_key) except RestApiError as e: return LocalDataset(descriptor_file) last_modified = datetime.strptime(dataset_info['updated'], '%Y-%m-%dT%H:%M:%S.%fZ') if (last_modified > datetime.utcfromtimestamp( path.getmtime(str(descriptor_file)))): if auto_update: try: backup_dir = path.join(self._config.cache_dir, owner_id, dataset_id, 'backup') move_cache_dir_to_backup_dir(backup_dir, cache_dir) descriptor_file = self.api_client. \ download_datapackage(dataset_key, cache_dir) except RestApiError as e: if backup_dir is not None: shutil.move(backup_dir, cache_dir) warn('Unable to auto update datapackage ({}). ' 'Loading previously saved version.' .format(e.reason)) else: raise else: filterwarnings('always', message='You are using an outdated copy') warn('You are using an outdated copy of {}. ' 'If you wish to use the latest version, call this ' 'function with the argument ' 'auto_update=True or ' 'force_update=True'.format(dataset_key)) if backup_dir is not None: shutil.rmtree(backup_dir, ignore_errors=True) return LocalDataset(descriptor_file)
def load_dataset(self, dataset_key, force_update=False, auto_update=False)
Load a dataset from the local filesystem, downloading it from data.world first, if necessary. This function returns an object of type `LocalDataset`. The object allows access to metedata via it's `describe()` method and to all the data via three properties `raw_data`, `tables` and `dataframes`, all of which are mappings (dict-like structures). :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param force_update: Flag, indicating if a new copy of the dataset should be downloaded replacing any previously downloaded copy (Default value = False) :type force_update: bool :param auto_update: Flag, indicating that dataset be updated to the latest version :type auto_update: bool :returns: The object representing the dataset :rtype: LocalDataset :raises RestApiError: If a server error occurs
2.360804
2.283165
1.034005
try: return RemoteFile(self._config, dataset_key, file_name, mode=mode, **kwargs) except Exception as e: raise RestApiError(cause=e)
def open_remote_file(self, dataset_key, file_name, mode='w', **kwargs)
Open a remote file object that can be used to write to or read from a file in a data.world dataset :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :param file_name: The name of the file to open :type file_name: str :param mode: the mode for the file - must be 'w', 'wb', 'r', or 'rb' - indicating read/write ('r'/'w') and optionally "binary" handling of the file data. (Default value = 'w') :type mode: str, optional :param chunk_size: size of chunked bytes to return when reading streamed bytes in 'rb' mode :type chunk_size: int, optional :param decode_unicode: whether to decode textual responses as unicode when returning streamed lines in 'r' mode :type decode_unicode: bool, optional :param **kwargs: Examples -------- >>> import datadotworld as dw >>> >>> # write a text file >>> with dw.open_remote_file('username/test-dataset', ... 'test.txt') as w: ... w.write("this is a test.") >>> >>> # write a jsonlines file >>> import json >>> with dw.open_remote_file('username/test-dataset', ... 'test.jsonl') as w: ... json.dump({'foo':42, 'bar':"A"}, w) ... w.write("\\n") ... json.dump({'foo':13, 'bar':"B"}, w) ... w.write("\\n") >>> >>> # write a csv file >>> import csv >>> with dw.open_remote_file('username/test-dataset', ... 'test.csv') as w: ... csvw = csv.DictWriter(w, fieldnames=['foo', 'bar']) ... csvw.writeheader() ... csvw.writerow({'foo':42, 'bar':"A"}) ... csvw.writerow({'foo':13, 'bar':"B"}) >>> >>> # write a pandas dataframe as a csv file >>> import pandas as pd >>> df = pd.DataFrame({'foo':[1,2,3,4],'bar':['a','b','c','d']}) >>> with dw.open_remote_file('username/test-dataset', ... 'dataframe.csv') as w: ... df.to_csv(w, index=False) >>> >>> # write a binary file >>> with dw.open_remote_file('username/test-dataset', >>> 'test.txt', mode='wb') as w: ... w.write(bytes([100,97,116,97,46,119,111,114,108,100])) >>> >>> # read a text file >>> with dw.open_remote_file('username/test-dataset', ... 'test.txt', mode='r') as r: ... print(r.read()) >>> >>> # read a csv file >>> with dw.open_remote_file('username/test-dataset', ... 'test.csv', mode='r') as r: ... csvr = csv.DictReader(r) ... for row in csvr: ... print(row['column a'], row['column b']) >>> >>> # read a binary file >>> with dw.open_remote_file('username/test-dataset', ... 'test', mode='rb') as r: ... bytes = r.read()
4.277523
9.499159
0.450305
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.add_files_by_source_with_http_info(owner, id, body, **kwargs) else: (data) = self.add_files_by_source_with_http_info(owner, id, body, **kwargs) return data
def add_files_by_source(self, owner, id, body, **kwargs)
Add files This method allows files published on the web to be added to a data.world dataset via their URL. The source URL will be stored so you can easily update your file anytime it changes via the *fetch latest* link on the [data.world](https://data.world/) dataset page or by triggering the GET:/sync endpoint. Check-out or tutorials for tips on how to add Google Sheets, GitHub and S3 files via URL and how to use webhooks or scripts to keep them always in sync. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.add_files_by_source(owner, id, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param FileBatchUpdateRequest body: (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.361906
1.514776
0.899081
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.create_dataset_with_http_info(owner, body, **kwargs) else: (data) = self.create_dataset_with_http_info(owner, body, **kwargs) return data
def create_dataset(self, owner, body, **kwargs)
Create a dataset Create a new dataset. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_dataset(owner, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param DatasetCreateRequest body: (required) :return: CreateDatasetResponse If the method is called asynchronously, returns the request thread.
1.464222
1.593515
0.918863
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_dataset_with_http_info(owner, id, **kwargs) else: (data) = self.delete_dataset_with_http_info(owner, id, **kwargs) return data
def delete_dataset(self, owner, id, **kwargs)
Delete a dataset Permanently deletes a dataset and all data associated with it. This operation cannot be undone, although a new dataset may be created with the same id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_dataset(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.475277
1.548219
0.952887
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_file_and_sync_source_with_http_info(owner, id, name, **kwargs) else: (data) = self.delete_file_and_sync_source_with_http_info(owner, id, name, **kwargs) return data
def delete_file_and_sync_source(self, owner, id, name, **kwargs)
Delete a file Delete a single file from a dataset by their name, including files added via URL. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_file_and_sync_source(owner, id, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param str name: Name and unique idenfiier of file within the dataset. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.358853
1.448507
0.938106
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_files_and_sync_sources_with_http_info(owner, id, name, **kwargs) else: (data) = self.delete_files_and_sync_sources_with_http_info(owner, id, name, **kwargs) return data
def delete_files_and_sync_sources(self, owner, id, name, **kwargs)
Delete files Delete one or more files from a dataset by their name, including files added via URL. **Batching** Note that the `name` parameter can be include multiple times in the query string, once for each file that is to be deleted together in a single request. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_files_and_sync_sources(owner, id, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param list[str] name: Names of files to be deleted. Multiple can be provided in a single request by repeating the query string parameter name as many times as necessary. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.354786
1.434684
0.94431
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_dataset_with_http_info(owner, id, **kwargs) else: (data) = self.get_dataset_with_http_info(owner, id, **kwargs) return data
def get_dataset(self, owner, id, **kwargs)
Retrieve a dataset Return details on the dataset. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_dataset(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: DatasetSummaryResponse If the method is called asynchronously, returns the request thread.
1.503029
1.562272
0.962079
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.patch_dataset_with_http_info(owner, id, body, **kwargs) else: (data) = self.patch_dataset_with_http_info(owner, id, body, **kwargs) return data
def patch_dataset(self, owner, id, body, **kwargs)
Update a dataset Update an existing dataset. Note that only elements or files included in the request will be updated. All omitted elements or files will remain untouched. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.patch_dataset(owner, id, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param DatasetPatchRequest body: (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.451619
1.570504
0.924301
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.replace_dataset_with_http_info(owner, id, body, **kwargs) else: (data) = self.replace_dataset_with_http_info(owner, id, body, **kwargs) return data
def replace_dataset(self, owner, id, body, **kwargs)
Create / Replace a dataset Create a dataset with a given id or completely rewrite the dataset, including any previously added files, if one already exists with the given id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.replace_dataset(owner, id, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param DatasetPutRequest body: (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.431065
1.565156
0.914327
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.sync_with_http_info(owner, id, **kwargs) else: (data) = self.sync_with_http_info(owner, id, **kwargs) return data
def sync(self, owner, id, **kwargs)
Sync files Update all files within a dataset that have originally been added via URL (e.g. via /datasets endpoints or on data.world). Check-out or tutorials for tips on how to add Google Sheets, GitHub and S3 files via URL and how to use webhooks or scripts to keep them always in sync. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.sync(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.502432
1.586076
0.947263
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.sync_via_get_with_http_info(owner, id, **kwargs) else: (data) = self.sync_via_get_with_http_info(owner, id, **kwargs) return data
def sync_via_get(self, owner, id, **kwargs)
Sync files (via GET) Update all files within a dataset that have originally been added via URL (e.g. via /datasets endpoints or on data.world). Check-out or tutorials for tips on how to add Google Sheets, GitHub and S3 files via URL and how to use webhooks or scripts to keep them always in sync. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.sync_via_get(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
1.391651
1.514391
0.918951
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.fetch_contributing_datasets_with_http_info(**kwargs) else: (data) = self.fetch_contributing_datasets_with_http_info(**kwargs) return data
def fetch_contributing_datasets(self, **kwargs)
List datasets as contributor Fetch datasets that the currently authenticated user has access to because he or she is a contributor. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.fetch_contributing_datasets(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str limit: Maximum number of items to include in a page of results. :param str next: Token from previous result page to be used when requesting a subsequent page. :return: PaginatedDatasetResults If the method is called asynchronously, returns the request thread.
1.659099
1.620619
1.023744
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.fetch_contributing_projects_with_http_info(**kwargs) else: (data) = self.fetch_contributing_projects_with_http_info(**kwargs) return data
def fetch_contributing_projects(self, **kwargs)
List projects as contributor Fetch projects that the currently authenticated user has access to because he or she is a contributor. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.fetch_contributing_projects(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :return: PaginatedProjectResults If the method is called asynchronously, returns the request thread.
1.646294
1.645683
1.000372
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.fetch_datasets_with_http_info(**kwargs) else: (data) = self.fetch_datasets_with_http_info(**kwargs) return data
def fetch_datasets(self, **kwargs)
List datasets as owner Fetch datasets that the currently authenticated user has access to because he or she is the owner of the dataset. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.fetch_datasets(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str limit: Maximum number of items to include in a page of results. :param str next: Token from previous result page to be used when requesting a subsequent page. :return: PaginatedDatasetResults If the method is called asynchronously, returns the request thread.
1.68002
1.675781
1.00253
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.fetch_liked_datasets_with_http_info(**kwargs) else: (data) = self.fetch_liked_datasets_with_http_info(**kwargs) return data
def fetch_liked_datasets(self, **kwargs)
List liked datasets Fetch datasets that the currently authenticated user likes. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.fetch_liked_datasets(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str limit: Maximum number of items to include in a page of results. :param str next: Token from previous result page to be used when requesting a subsequent page. :return: PaginatedDatasetResults If the method is called asynchronously, returns the request thread.
1.619271
1.596374
1.014343
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.fetch_liked_projects_with_http_info(**kwargs) else: (data) = self.fetch_liked_projects_with_http_info(**kwargs) return data
def fetch_liked_projects(self, **kwargs)
List liked projects Fetch projects that the currently authenticated user likes. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.fetch_liked_projects(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :return: PaginatedProjectResults If the method is called asynchronously, returns the request thread.
1.592219
1.638024
0.972037
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.fetch_projects_with_http_info(**kwargs) else: (data) = self.fetch_projects_with_http_info(**kwargs) return data
def fetch_projects(self, **kwargs)
List projects owned Fetch projects that the currently authenticated user has access to because he or she is the owner of the project. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.fetch_projects(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :return: PaginatedProjectResults If the method is called asynchronously, returns the request thread.
1.622496
1.723356
0.941475
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_user_data_with_http_info(**kwargs) else: (data) = self.get_user_data_with_http_info(**kwargs) return data
def get_user_data(self, **kwargs)
Get user data Return profile information for the currently authenticated user. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_user_data(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :return: UserDataResponse If the method is called asynchronously, returns the request thread.
1.561565
1.638944
0.952787