_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q6600
|
list_vmss_vm_instance_view_pg
|
train
|
def list_vmss_vm_instance_view_pg(access_token, subscription_id, resource_group, vmss_name,
link=None):
'''Gets one page of a paginated list of scale set VM instance views.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
link (str): Optional link to URI to get list (as part of a paginated API query).
Returns:
HTTP response. JSON body of list of VM instance views.
'''
if link is None:
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'/virtualMachines?$expand=instanceView&$select=instanceView',
'&api-version=', COMP_API])
else:
endpoint = link
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6601
|
poweroff_vmss
|
train
|
def poweroff_vmss(access_token, subscription_id, resource_group, vmss_name):
'''Power off all the VMs in a virtual machine scale set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'/powerOff?api-version=', COMP_API])
body = '{"instanceIds" : ["*"]}'
return do_post(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6602
|
poweroff_vmss_vms
|
train
|
def poweroff_vmss_vms(access_token, subscription_id, resource_group, vmss_name, instance_ids):
'''Poweroff all the VMs in a virtual machine scale set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
instance_ids (str): String representation of a JSON list of VM IDs. E.g. '[1,2]'.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'/powerOff?api-version=', COMP_API])
body = '{"instanceIds" : ' + instance_ids + '}'
return do_post(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6603
|
put_vmss
|
train
|
def put_vmss(access_token, subscription_id, resource_group, vmss_name, vmss_body):
'''Put VMSS body.
Can be used to create or update a scale set.
E.g. call get_vmss(), make changes to the body, call put_vmss().
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the new scale set.
vmss_body (dictionary): Body containining
Returns:
HTTP response. JSON body of the virtual machine scale set properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'?api-version=', COMP_API])
body = json.dumps(vmss_body)
return do_put(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6604
|
scale_vmss
|
train
|
def scale_vmss(access_token, subscription_id, resource_group, vmss_name, capacity):
'''Change the instance count of an existing VM Scale Set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
capacity (int): New number of VMs.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'?api-version=', COMP_API])
body = '{"sku":{"capacity":"' + str(capacity) + '"}}'
return do_patch(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6605
|
start_vm
|
train
|
def start_vm(access_token, subscription_id, resource_group, vm_name):
'''Start a virtual machine.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vm_name (str): Name of the virtual machine.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachines/',
vm_name,
'/start',
'?api-version=', COMP_API])
return do_post(endpoint, '', access_token)
|
python
|
{
"resource": ""
}
|
q6606
|
update_vm
|
train
|
def update_vm(access_token, subscription_id, resource_group, vm_name, body):
'''Update a virtual machine with a new JSON body. E.g. do a GET, change something, call this.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vm_name (str): Name of the virtual machine.
body (dict): JSON body of the VM.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachines/', vm_name,
'?api-version=', COMP_API])
return do_put(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6607
|
update_vmss
|
train
|
def update_vmss(access_token, subscription_id, resource_group, vmss_name, body):
'''Update a VMSS with a new JSON body. E.g. do a GET, change something, call this.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vm_name (str): Name of the virtual machine.
body (dict): JSON body of the VM scale set.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'?api-version=', COMP_API])
return do_put(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6608
|
get_vm_ids_by_ud
|
train
|
def get_vm_ids_by_ud(access_token, subscription_id, resource_group, vmssname, updatedomain):
'''look at VMSS VM instance view to get VM IDs by UD'''
instance_viewlist = azurerm.list_vmss_vm_instance_view(access_token, subscription_id,
resource_group, vmssname)
# print(json.dumps(instance_viewlist, sort_keys=False, indent=2, separators=(',', ': ')))
# loop through the instance view list, and build the vm id list of VMs in
# the matching UD
udinstancelist = []
for instance_view in instance_viewlist['value']:
vmud = instance_view['properties']['instance_view']['platformUpdateDomain']
if vmud == updatedomain:
udinstancelist.append(instance_view['instanceId'])
udinstancelist.sort()
return udinstancelist
|
python
|
{
"resource": ""
}
|
q6609
|
get_graph_token_from_msi
|
train
|
def get_graph_token_from_msi():
'''get a Microsoft Graph access token using Azure Cloud Shell's MSI_ENDPOINT.
Notes:
The auth token returned by this function is not an Azure auth token. Use it for querying
the Microsoft Graph API.
This function only works in an Azure cloud shell or virtual machine.
Returns:
A Microsoft Graph authentication token string.
'''
if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ:
endpoint = os.environ['MSI_ENDPOINT']
else:
return None
headers = {'Metadata': 'true'}
body = {"resource": 'https://' + GRAPH_RESOURCE_HOST + '/'}
ret = requests.post(endpoint, headers=headers, data=body)
return ret.json()['access_token']
|
python
|
{
"resource": ""
}
|
q6610
|
get_object_id_from_graph
|
train
|
def get_object_id_from_graph(access_token=None):
'''Return the object ID for the Graph user who owns the access token.
Args:
access_token (str): A Microsoft Graph access token. (Not an Azure access token.)
If not provided, attempt to get it from MSI_ENDPOINT.
Returns:
An object ID string for a user or service principal.
'''
if access_token is None:
access_token = get_graph_token_from_msi()
endpoint = 'https://' + GRAPH_RESOURCE_HOST + '/v1.0/me/'
headers = {'Authorization': 'Bearer ' + access_token, 'Host': GRAPH_RESOURCE_HOST}
ret = requests.get(endpoint, headers=headers)
return ret.json()['id']
|
python
|
{
"resource": ""
}
|
q6611
|
get_subscription_from_cli
|
train
|
def get_subscription_from_cli(name=None):
'''Get the default, or named, subscription id from CLI's local cache.
Args:
name (str): Optional subscription name. If this is set, the subscription id of the named
subscription is returned from the CLI cache if present. If not set, the subscription id
of the default subscription is returned.
Returns:
Azure subscription ID string.
Requirements:
User has run 'az login' once, or is in Azure Cloud Shell.
'''
home = os.path.expanduser('~')
azure_profile_path = home + os.sep + '.azure' + os.sep + 'azureProfile.json'
if os.path.isfile(azure_profile_path) is False:
print('Error from get_subscription_from_cli(): Cannot find ' +
azure_profile_path)
return None
with io.open(azure_profile_path, 'r', encoding='utf-8-sig') as azure_profile_fd:
azure_profile = json.load(azure_profile_fd)
for subscription_info in azure_profile['subscriptions']:
if (name is None and subscription_info['isDefault'] is True) or \
subscription_info['name'] == name:
return subscription_info['id']
return None
|
python
|
{
"resource": ""
}
|
q6612
|
list_locations
|
train
|
def list_locations(access_token, subscription_id):
'''List available locations for a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON list of locations.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/locations?api-version=', BASE_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6613
|
create_cosmosdb_account
|
train
|
def create_cosmosdb_account(access_token, subscription_id, rgname, account_name, location,
cosmosdb_kind):
'''Create a new Cosmos DB account in the named resource group, with the named location.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
account_name (str): Name of the new Cosmos DB account.
location (str): Azure data center location. E.g. westus.
cosmosdb_kind (str): Database type. E.g. GlobalDocumentDB.
Returns:
HTTP response. JSON body of storage account properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.DocumentDB/databaseAccounts/', account_name,
'?api-version=', COSMOSDB_API])
cosmosdb_body = {'location': location,
'kind': cosmosdb_kind,
'properties': {'databaseAccountOfferType': 'Standard',
'locations': [{'failoverPriority': 0,
'locationName': location}]}}
body = json.dumps(cosmosdb_body)
return do_put(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6614
|
get_cosmosdb_account_keys
|
train
|
def get_cosmosdb_account_keys(access_token, subscription_id, rgname, account_name):
'''Get the access keys for the specified Cosmos DB account.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
account_name (str): Name of the Cosmos DB account.
Returns:
HTTP response. JSON body of Cosmos DB account keys.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.DocumentDB/databaseAccounts/', account_name,
'/listKeys',
'?api-version=', COSMOSDB_API])
return do_post(endpoint, '', access_token)
|
python
|
{
"resource": ""
}
|
q6615
|
create_keyvault
|
train
|
def create_keyvault(access_token, subscription_id, rgname, vault_name, location,
template_deployment=True, tenant_id=None, object_id=None):
'''Create a new key vault in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
vault_name (str): Name of the new key vault.
location (str): Azure data center location. E.g. westus2.
template_deployment (boolean): Whether to allow deployment from template.
tenant_id (str): Optionally specify a tenant ID (otherwise picks first response) from
ist_tenants().
object_id (str): Optionally specify an object ID representing user or principal for the
access policy.
Returns:
HTTP response. JSON body of key vault properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults/', vault_name,
'?api-version=', KEYVAULT_API])
# get tenant ID if not specified
if tenant_id is None:
ret = list_tenants(access_token)
tenant_id = ret['value'][0]['tenantId']
# if object_id is None:
access_policies = [{'tenantId': tenant_id, 'objectId': object_id,
'permissions': {
'keys': ['get', 'create', 'delete', 'list', 'update', 'import',
'backup', 'restore', 'recover'],
'secrets': ['get', 'list', 'set', 'delete', 'backup', 'restore',
'recover'],
'certificates': ['get', 'list', 'delete', 'create', 'import', 'update',
'managecontacts', 'getissuers', 'listissuers',
'setissuers', 'deleteissuers', 'manageissuers',
'recover'],
'storage': ['get', 'list', 'delete', 'set', 'update', 'regeneratekey',
'setsas', 'listsas', 'getsas', 'deletesas']
}}]
vault_properties = {'tenantId': tenant_id, 'sku': {'family': 'A', 'name': 'standard'},
'enabledForTemplateDeployment': template_deployment,
'accessPolicies': access_policies}
vault_body = {'location': location, 'properties': vault_properties}
body = json.dumps(vault_body)
return do_put(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6616
|
delete_keyvault
|
train
|
def delete_keyvault(access_token, subscription_id, rgname, vault_name):
'''Deletes a key vault in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
vault_name (str): Name of the new key vault.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults/', vault_name,
'?api-version=', KEYVAULT_API])
return do_delete(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6617
|
get_keyvault
|
train
|
def get_keyvault(access_token, subscription_id, rgname, vault_name):
'''Gets details about the named key vault.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
vault_name (str): Name of the key vault.
Returns:
HTTP response. JSON body of key vault properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults/', vault_name,
'?api-version=', KEYVAULT_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6618
|
list_keyvaults
|
train
|
def list_keyvaults(access_token, subscription_id, rgname):
'''Lists key vaults in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults',
'?api-version=', KEYVAULT_API])
return do_get_next(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6619
|
list_keyvaults_sub
|
train
|
def list_keyvaults_sub(access_token, subscription_id):
'''Lists key vaults belonging to this subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.KeyVault/vaults',
'?api-version=', KEYVAULT_API])
return do_get_next(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6620
|
set_keyvault_secret
|
train
|
def set_keyvault_secret(access_token, vault_uri, secret_name, secret_value):
'''Adds a secret to a key vault using the key vault URI.
Creates a new version if the secret already exists.
Args:
access_token (str): A valid Azure authentication token.
vault_uri (str): Vault URI e.g. https://myvault.vault.azure.net.
secret_name (str): Name of the secret to add.
secret_value (str): Value of the secret.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([vault_uri,
'/secrets/', secret_name,
'?api-version=', '7.0'])
current_time = datetime.datetime.now().isoformat()
attributes = {'created': current_time,
'enabled': True,
'exp': None,
'nbf': None,
'recoveryLevel': 'Purgeable',
'updated': current_time}
secret_body = {'attributes': attributes,
'contentType': None,
'kid': None,
'managed': None,
'tags': {'file-encoding': 'utf-8'},
'value': secret_value}
body = json.dumps(secret_body)
print(body)
return do_put(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6621
|
delete_keyvault_secret
|
train
|
def delete_keyvault_secret(access_token, vault_uri, secret_name):
'''Deletes a secret from a key vault using the key vault URI.
Args:
access_token (str): A valid Azure authentication token.
vault_uri (str): Vault URI e.g. https://myvault.azure.net.
secret_name (str): Name of the secret to add.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([vault_uri,
'/secrets/', secret_name,
'?api-version=', '7.0'])
return do_delete(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6622
|
create_autoscale_setting
|
train
|
def create_autoscale_setting(access_token, subscription_id, resource_group, setting_name,
vmss_name, location, minval, maxval, default, autoscale_rules,
notify=None):
'''Create a new autoscale setting for a scale set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
setting_name (str): Name of the autoscale setting.
vmss_name (str): Name of scale set to apply scale events to.
location (str): Azure data center location. E.g. westus.
minval (int): Minimum number of VMs.
maxval (int): Maximum number of VMs.
default (int): Default VM number when no data available.
autoscale_rules (list): List of outputs from create_autoscale_rule().
notify (str): Optional.
Returns:
HTTP response. JSON body of autoscale setting.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/microsoft.insights/autoscaleSettings/', setting_name,
'?api-version=', INSIGHTS_API])
autoscale_setting = {'location': location}
profile = {'name': 'Profile1'}
capacity = {'minimum': str(minval)}
capacity['maximum'] = str(maxval)
capacity['default'] = str(default)
profile['capacity'] = capacity
profile['rules'] = autoscale_rules
profiles = [profile]
properties = {'name': setting_name}
properties['profiles'] = profiles
properties['targetResourceUri'] = '/subscriptions/' + subscription_id + \
'/resourceGroups/' + resource_group + \
'/providers/Microsoft.Compute/virtualMachineScaleSets/' + vmss_name
properties['enabled'] = True
if notify is not None:
notification = {'operation': 'Scale'}
email = {'sendToSubscriptionAdministrato': False}
email['sendToSubscriptionCoAdministrators'] = False
email['customEmails'] = [notify]
notification = {'email': email}
properties['notifications'] = [notification]
autoscale_setting['properties'] = properties
body = json.dumps(autoscale_setting)
return do_put(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6623
|
list_autoscale_settings
|
train
|
def list_autoscale_settings(access_token, subscription_id):
'''List the autoscale settings in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of autoscale settings.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/microsoft.insights/',
'/autoscaleSettings?api-version=', INSIGHTS_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6624
|
list_insights_components
|
train
|
def list_insights_components(access_token, subscription_id, resource_group):
'''List the Microsoft Insights components in a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
Returns:
HTTP response. JSON body of components.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/microsoft.insights/',
'/components?api-version=', INSIGHTS_COMPONENTS_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6625
|
list_metric_defs_for_resource
|
train
|
def list_metric_defs_for_resource(access_token, subscription_id, resource_group,
resource_provider, resource_type, resource_name):
'''List the monitoring metric definitions for a resource.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
resource_provider (str): Type of resource provider.
resource_type (str): Type of resource.
resource_name (str): Name of resource.
Returns:
HTTP response. JSON body of metric definitions.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/', resource_provider,
'/', resource_type,
'/', resource_name,
'/providers/microsoft.insights',
'/metricdefinitions?api-version=', INSIGHTS_METRICS_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6626
|
get_metrics_for_resource
|
train
|
def get_metrics_for_resource(access_token, subscription_id, resource_group, resource_provider,
resource_type, resource_name):
'''Get the monitoring metrics for a resource.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
resource_type (str): Type of resource.
resource_name (str): Name of resource.
Returns:
HTTP response. JSON body of resource metrics.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/', resource_provider,
'/', resource_type,
'/', resource_name,
'/providers/microsoft.insights',
'/metrics?api-version=', INSIGHTS_PREVIEW_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6627
|
get_events_for_subscription
|
train
|
def get_events_for_subscription(access_token, subscription_id, start_timestamp):
'''Get the insights evens for a subsctipion since the specific timestamp.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
start_timestamp (str): timestamp to get events from. E.g. '2017-05-01T00:00:00.0000000Z'.
Returns:
HTTP response. JSON body of insights events.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/microsoft.insights/eventtypes/management/values?api-version=',
INSIGHTS_API, '&$filter=eventTimestamp ge \'', start_timestamp, '\''])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6628
|
deploy_template
|
train
|
def deploy_template(access_token, subscription_id, resource_group, deployment_name, template,
parameters):
'''Deploy a template referenced by a JSON string, with parameters as a JSON string.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
deployment_name (str): A name you give to the deployment.
template (str): String representatipn of a JSON template body.
parameters (str): String representation of a JSON template parameters body.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.Resources/deployments/', deployment_name,
'?api-version=', DEPLOYMENTS_API])
properties = {'template': template}
properties['mode'] = 'Incremental'
properties['parameters'] = parameters
template_body = {'properties': properties}
body = json.dumps(template_body)
return do_put(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6629
|
deploy_template_uri
|
train
|
def deploy_template_uri(access_token, subscription_id, resource_group, deployment_name,
template_uri, parameters):
'''Deploy a template referenced by a URI, with parameters as a JSON string.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
deployment_name (str): A name you give to the deployment.
template_uri (str): URI which points to a JSON template (e.g. github raw location).
parameters (str): String representation of a JSON template parameters body.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.Resources/deployments/', deployment_name,
'?api-version=', DEPLOYMENTS_API])
properties = {'templateLink': {'uri': template_uri}}
properties['mode'] = 'Incremental'
properties['parameters'] = parameters
template_body = {'properties': properties}
body = json.dumps(template_body)
return do_put(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6630
|
deploy_template_uri_param_uri
|
train
|
def deploy_template_uri_param_uri(access_token, subscription_id, resource_group, deployment_name,
template_uri, parameters_uri):
'''Deploy a template with both template and parameters referenced by URIs.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
deployment_name (str): A name you give to the deployment.
template_uri (str): URI which points to a JSON template (e.g. github raw location).
parameters_uri (str): URI which points to a JSON parameters file (e.g. github raw location).
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.Resources/deployments/', deployment_name,
'?api-version=', DEPLOYMENTS_API])
properties = {'templateLink': {'uri': template_uri}}
properties['mode'] = 'Incremental'
properties['parametersLink'] = {'uri': parameters_uri}
template_body = {'properties': properties}
body = json.dumps(template_body)
return do_put(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6631
|
attach_model
|
train
|
def attach_model(subscription, rgname, vmssvm_model, diskname, lun):
'''Attach a data disk to a VMSS VM model'''
disk_id = '/subscriptions/' + subscription + '/resourceGroups/' + rgname + \
'/providers/Microsoft.Compute/disks/' + diskname
disk_model = {'lun': lun, 'createOption': 'Attach', 'caching': 'None',
'managedDisk': {'storageAccountType': 'Standard_LRS', 'id': disk_id}}
vmssvm_model['properties']['storageProfile']['dataDisks'].append(
disk_model)
return vmssvm_model
|
python
|
{
"resource": ""
}
|
q6632
|
detach_model
|
train
|
def detach_model(vmssvm_model, lun):
'''Detach a data disk from a VMSS VM model'''
data_disks = vmssvm_model['properties']['storageProfile']['dataDisks']
data_disks[:] = [disk for disk in data_disks if disk.get('lun') != lun]
vmssvm_model['properties']['storageProfile']['dataDisks'] = data_disks
return vmssvm_model
|
python
|
{
"resource": ""
}
|
q6633
|
get_access_token
|
train
|
def get_access_token(tenant_id, application_id, application_secret):
'''get an Azure access token using the adal library.
Args:
tenant_id (str): Tenant id of the user's account.
application_id (str): Application id of a Service Principal account.
application_secret (str): Application secret (password) of the Service Principal account.
Returns:
An Azure authentication token string.
'''
context = adal.AuthenticationContext(
get_auth_endpoint() + tenant_id, api_version=None)
token_response = context.acquire_token_with_client_credentials(
get_resource_endpoint(), application_id, application_secret)
return token_response.get('accessToken')
|
python
|
{
"resource": ""
}
|
q6634
|
get_access_token_from_cli
|
train
|
def get_access_token_from_cli():
'''Get an Azure authentication token from CLI's cache.
Will only work if CLI local cache has an unexpired auth token (i.e. you ran 'az login'
recently), or if you are running in Azure Cloud Shell (aka cloud console)
Returns:
An Azure authentication token string.
'''
# check if running in cloud shell, if so, pick up token from MSI_ENDPOINT
if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ:
endpoint = os.environ['MSI_ENDPOINT']
headers = {'Metadata': 'true'}
body = {"resource": "https://management.azure.com/"}
ret = requests.post(endpoint, headers=headers, data=body)
return ret.json()['access_token']
else: # not running cloud shell
home = os.path.expanduser('~')
sub_username = ""
# 1st identify current subscription
azure_profile_path = home + os.sep + '.azure' + os.sep + 'azureProfile.json'
if os.path.isfile(azure_profile_path) is False:
print('Error from get_access_token_from_cli(): Cannot find ' + azure_profile_path)
return None
with codecs.open(azure_profile_path, 'r', 'utf-8-sig') as azure_profile_fd:
subs = json.load(azure_profile_fd)
for sub in subs['subscriptions']:
if sub['isDefault'] == True:
sub_username = sub['user']['name']
if sub_username == "":
print('Error from get_access_token_from_cli(): Default subscription not found in ' + \
azure_profile_path)
return None
# look for acces_token
access_keys_path = home + os.sep + '.azure' + os.sep + 'accessTokens.json'
if os.path.isfile(access_keys_path) is False:
print('Error from get_access_token_from_cli(): Cannot find ' + access_keys_path)
return None
with open(access_keys_path, 'r') as access_keys_fd:
keys = json.load(access_keys_fd)
# loop through accessTokens.json until first unexpired entry found
for key in keys:
if key['userId'] == sub_username:
if 'accessToken' not in keys[0]:
print('Error from get_access_token_from_cli(): accessToken not found in ' + \
access_keys_path)
return None
if 'tokenType' not in keys[0]:
print('Error from get_access_token_from_cli(): tokenType not found in ' + \
access_keys_path)
return None
if 'expiresOn' not in keys[0]:
print('Error from get_access_token_from_cli(): expiresOn not found in ' + \
access_keys_path)
return None
expiry_date_str = key['expiresOn']
# check date and skip past expired entries
if 'T' in expiry_date_str:
exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%dT%H:%M:%S.%fZ')
else:
exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%d %H:%M:%S.%f')
if exp_date < dt.now():
continue
else:
return key['accessToken']
# if dropped out of the loop, token expired
print('Error from get_access_token_from_cli(): token expired. Run \'az login\'')
return None
|
python
|
{
"resource": ""
}
|
q6635
|
list_offers
|
train
|
def list_offers(access_token, subscription_id, location, publisher):
'''List available VM image offers from a publisher.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
location (str): Azure data center location. E.g. westus.
publisher (str): Publisher name, e.g. Canonical.
Returns:
HTTP response with JSON list of image offers.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers/', publisher,
'/artifacttypes/vmimage/offers?api-version=', COMP_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6636
|
list_skus
|
train
|
def list_skus(access_token, subscription_id, location, publisher, offer):
'''List available VM image skus for a publisher offer.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
location (str): Azure data center location. E.g. westus.
publisher (str): VM image publisher. E.g. MicrosoftWindowsServer.
offer (str): VM image offer. E.g. WindowsServer.
Returns:
HTTP response with JSON list of skus.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers/', publisher,
'/artifacttypes/vmimage/offers/', offer,
'/skus?api-version=', COMP_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6637
|
list_sku_versions
|
train
|
def list_sku_versions(access_token, subscription_id, location, publisher, offer, sku):
'''List available versions for a given publisher's sku.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
location (str): Azure data center location. E.g. westus.
publisher (str): VM image publisher. E.g. MicrosoftWindowsServer.
offer (str): VM image offer. E.g. WindowsServer.
sku (str): VM image sku. E.g. 2016-Datacenter.
Returns:
HTTP response with JSON list of versions.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers/', publisher,
'/artifacttypes/vmimage/offers/', offer,
'/skus/', sku,
'/versions?api-version=', COMP_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6638
|
create_container_definition
|
train
|
def create_container_definition(container_name, image, port=80, cpu=1.0, memgb=1.5,
environment=None):
'''Makes a python dictionary of container properties.
Args:
container_name: The name of the container.
image (str): Container image string. E.g. nginx.
port (int): TCP port number. E.g. 8080.
cpu (float): Amount of CPU to allocate to container. E.g. 1.0.
memgb (float): Memory in GB to allocate to container. E.g. 1.5.
environment (list): A list of [{'name':'envname', 'value':'envvalue'}].
Sets environment variables in the container.
Returns:
A Python dictionary of container properties, pass a list of these to
create_container_group().
'''
container = {'name': container_name}
container_properties = {'image': image}
container_properties['ports'] = [{'port': port}]
container_properties['resources'] = {
'requests': {'cpu': cpu, 'memoryInGB': memgb}}
container['properties'] = container_properties
if environment is not None:
container_properties['environmentVariables'] = environment
return container
|
python
|
{
"resource": ""
}
|
q6639
|
create_container_instance_group
|
train
|
def create_container_instance_group(access_token, subscription_id, resource_group,
container_group_name, container_list, location,
ostype='Linux', port=80, iptype='public'):
'''Create a new container group with a list of containers specifified by container_list.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
container_list (list): A list of container properties. Use create_container_definition to
create each container property set.
location (str): Azure data center location. E.g. westus.
ostype (str): Container operating system type. Linux or Windows.
port (int): TCP port number. E.g. 8080.
iptype (str): Type of IP address. E.g. public.
Returns:
HTTP response with JSON body of container group.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerInstance/ContainerGroups/',
container_group_name,
'?api-version=', CONTAINER_API])
container_group_body = {'location': location}
properties = {'osType': ostype}
properties['containers'] = container_list
ipport = {'protocol': 'TCP'}
ipport['port'] = port
ipaddress = {'ports': [ipport]}
ipaddress['type'] = iptype
properties['ipAddress'] = ipaddress
container_group_body['properties'] = properties
body = json.dumps(container_group_body)
return do_put(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6640
|
delete_container_instance_group
|
train
|
def delete_container_instance_group(access_token, subscription_id, resource_group,
container_group_name):
'''Delete a container group from a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerInstance/ContainerGroups/',
container_group_name,
'?api-version=', CONTAINER_API])
return do_delete(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6641
|
get_container_instance_group
|
train
|
def get_container_instance_group(access_token, subscription_id, resource_group,
container_group_name):
'''Get the JSON definition of a container group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
Returns:
HTTP response. JSON body of container group.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerInstance/ContainerGroups/',
container_group_name,
'?api-version=', CONTAINER_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6642
|
get_container_instance_logs
|
train
|
def get_container_instance_logs(access_token, subscription_id, resource_group, container_group_name,
container_name=None):
'''Get the container logs for containers in a container group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
container_name (str): Optional name of a container in the group.
Returns:
HTTP response. Container logs.
'''
if container_name is None:
container_name = container_group_name
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerInstance/ContainerGroups/',
container_group_name,
'/containers/', container_name, '/logs?api-version=', CONTAINER_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6643
|
list_container_instance_groups
|
train
|
def list_container_instance_groups(access_token, subscription_id, resource_group):
'''List the container groups in a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
Returns:
HTTP response. JSON list of container groups and their properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerInstance/ContainerGroups',
'?api-version=', CONTAINER_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6644
|
list_container_instance_groups_sub
|
train
|
def list_container_instance_groups_sub(access_token, subscription_id):
'''List the container groups in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON list of container groups and their properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.ContainerInstance/ContainerGroups',
'?api-version=', CONTAINER_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6645
|
create_resource_group
|
train
|
def create_resource_group(access_token, subscription_id, rgname, location):
'''Create a resource group in the specified location.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
location (str): Azure data center location. E.g. westus.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'?api-version=', RESOURCE_API])
rg_body = {'location': location}
body = json.dumps(rg_body)
return do_put(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6646
|
delete_resource_group
|
train
|
def delete_resource_group(access_token, subscription_id, rgname):
'''Delete the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'?api-version=', RESOURCE_API])
return do_delete(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6647
|
export_template
|
train
|
def export_template(access_token, subscription_id, rgname):
'''Capture the specified resource group as a template
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/exportTemplate',
'?api-version=', RESOURCE_API])
rg_body = {'options':'IncludeParameterDefaultValue', 'resources':['*']}
body = json.dumps(rg_body)
return do_post(endpoint, body, access_token)
|
python
|
{
"resource": ""
}
|
q6648
|
get_resource_group
|
train
|
def get_resource_group(access_token, subscription_id, rgname):
'''Get details about the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', rgname,
'?api-version=', RESOURCE_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6649
|
list_resource_groups
|
train
|
def list_resource_groups(access_token, subscription_id):
'''List the resource groups in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/',
'?api-version=', RESOURCE_API])
return do_get(endpoint, access_token)
|
python
|
{
"resource": ""
}
|
q6650
|
CacheManager.get
|
train
|
def get(self, requirement):
"""
Get a distribution archive from any of the available caches.
:param requirement: A :class:`.Requirement` object.
:returns: The absolute pathname of a local file or :data:`None` when the
distribution archive is missing from all available caches.
"""
filename = self.generate_filename(requirement)
for backend in list(self.backends):
try:
pathname = backend.get(filename)
if pathname is not None:
return pathname
except CacheBackendDisabledError as e:
logger.debug("Disabling %s because it requires configuration: %s", backend, e)
self.backends.remove(backend)
except Exception as e:
logger.exception("Disabling %s because it failed: %s", backend, e)
self.backends.remove(backend)
|
python
|
{
"resource": ""
}
|
q6651
|
CacheManager.put
|
train
|
def put(self, requirement, handle):
"""
Store a distribution archive in all of the available caches.
:param requirement: A :class:`.Requirement` object.
:param handle: A file-like object that provides access to the
distribution archive.
"""
filename = self.generate_filename(requirement)
for backend in list(self.backends):
handle.seek(0)
try:
backend.put(filename, handle)
except CacheBackendDisabledError as e:
logger.debug("Disabling %s because it requires configuration: %s", backend, e)
self.backends.remove(backend)
except Exception as e:
logger.exception("Disabling %s because it failed: %s", backend, e)
self.backends.remove(backend)
|
python
|
{
"resource": ""
}
|
q6652
|
CacheManager.generate_filename
|
train
|
def generate_filename(self, requirement):
"""
Generate a distribution archive filename for a package.
:param requirement: A :class:`.Requirement` object.
:returns: The filename of the distribution archive (a string)
including a single leading directory component to indicate
the cache format revision.
"""
return FILENAME_PATTERN % (self.config.cache_format_revision,
requirement.name, requirement.version,
get_python_version())
|
python
|
{
"resource": ""
}
|
q6653
|
SystemPackageManager.install_dependencies
|
train
|
def install_dependencies(self, requirement):
"""
Install missing dependencies for the given requirement.
:param requirement: A :class:`.Requirement` object.
:returns: :data:`True` when missing system packages were installed,
:data:`False` otherwise.
:raises: :exc:`.DependencyInstallationRefused` when automatic
installation is disabled or refused by the operator.
:raises: :exc:`.DependencyInstallationFailed` when the installation
of missing system packages fails.
If `pip-accel` fails to build a binary distribution, it will call this
method as a last chance to install missing dependencies. If this
function does not raise an exception, `pip-accel` will retry the build
once.
"""
install_timer = Timer()
missing_dependencies = self.find_missing_dependencies(requirement)
if missing_dependencies:
# Compose the command line for the install command.
install_command = shlex.split(self.install_command) + missing_dependencies
# Prepend `sudo' to the command line?
if not WINDOWS and not is_root():
# FIXME Ideally this should properly detect the presence of `sudo'.
# Or maybe this should just be embedded in the *.ini files?
install_command.insert(0, 'sudo')
# Always suggest the installation command to the operator.
logger.info("You seem to be missing %s: %s",
pluralize(len(missing_dependencies), "dependency", "dependencies"),
concatenate(missing_dependencies))
logger.info("You can install %s with this command: %s",
"it" if len(missing_dependencies) == 1 else "them", " ".join(install_command))
if self.config.auto_install is False:
# Refuse automatic installation and don't prompt the operator when the configuration says no.
self.installation_refused(requirement, missing_dependencies, "automatic installation is disabled")
# Get the operator's permission to install the missing package(s).
if self.config.auto_install:
logger.info("Got permission to install %s (via auto_install option).",
pluralize(len(missing_dependencies), "dependency", "dependencies"))
elif self.confirm_installation(requirement, missing_dependencies, install_command):
logger.info("Got permission to install %s (via interactive prompt).",
pluralize(len(missing_dependencies), "dependency", "dependencies"))
else:
logger.error("Refused installation of missing %s!",
"dependency" if len(missing_dependencies) == 1 else "dependencies")
self.installation_refused(requirement, missing_dependencies, "manual installation was refused")
if subprocess.call(install_command) == 0:
logger.info("Successfully installed %s in %s.",
pluralize(len(missing_dependencies), "dependency", "dependencies"),
install_timer)
return True
else:
logger.error("Failed to install %s.",
pluralize(len(missing_dependencies), "dependency", "dependencies"))
msg = "Failed to install %s required by Python package %s! (%s)"
raise DependencyInstallationFailed(msg % (pluralize(len(missing_dependencies),
"system package", "system packages"),
requirement.name,
concatenate(missing_dependencies)))
return False
|
python
|
{
"resource": ""
}
|
q6654
|
SystemPackageManager.find_missing_dependencies
|
train
|
def find_missing_dependencies(self, requirement):
"""
Find missing dependencies of a Python package.
:param requirement: A :class:`.Requirement` object.
:returns: A list of strings with system package names.
"""
known_dependencies = self.find_known_dependencies(requirement)
if known_dependencies:
installed_packages = self.find_installed_packages()
logger.debug("Checking for missing dependencies of %s ..", requirement.name)
missing_dependencies = sorted(set(known_dependencies).difference(installed_packages))
if missing_dependencies:
logger.debug("Found %s: %s",
pluralize(len(missing_dependencies), "missing dependency", "missing dependencies"),
concatenate(missing_dependencies))
else:
logger.info("All known dependencies are already installed.")
return missing_dependencies
|
python
|
{
"resource": ""
}
|
q6655
|
SystemPackageManager.find_known_dependencies
|
train
|
def find_known_dependencies(self, requirement):
"""
Find the known dependencies of a Python package.
:param requirement: A :class:`.Requirement` object.
:returns: A list of strings with system package names.
"""
logger.info("Checking for known dependencies of %s ..", requirement.name)
known_dependencies = sorted(self.dependencies.get(requirement.name.lower(), []))
if known_dependencies:
logger.info("Found %s: %s", pluralize(len(known_dependencies), "known dependency", "known dependencies"),
concatenate(known_dependencies))
else:
logger.info("No known dependencies... Maybe you have a suggestion?")
return known_dependencies
|
python
|
{
"resource": ""
}
|
q6656
|
SystemPackageManager.find_installed_packages
|
train
|
def find_installed_packages(self):
"""
Find the installed system packages.
:returns: A list of strings with system package names.
:raises: :exc:`.SystemDependencyError` when the command to list the
installed system packages fails.
"""
list_command = subprocess.Popen(self.list_command, shell=True, stdout=subprocess.PIPE)
stdout, stderr = list_command.communicate()
if list_command.returncode != 0:
raise SystemDependencyError("The command to list the installed system packages failed! ({command})",
command=self.list_command)
installed_packages = sorted(stdout.decode().split())
logger.debug("Found %i installed system package(s): %s", len(installed_packages), installed_packages)
return installed_packages
|
python
|
{
"resource": ""
}
|
q6657
|
SystemPackageManager.confirm_installation
|
train
|
def confirm_installation(self, requirement, missing_dependencies, install_command):
"""
Ask the operator's permission to install missing system packages.
:param requirement: A :class:`.Requirement` object.
:param missing_dependencies: A list of strings with missing dependencies.
:param install_command: A list of strings with the command line needed
to install the missing dependencies.
:raises: :exc:`.DependencyInstallationRefused` when the operator refuses.
"""
try:
return prompt_for_confirmation(format(
"Do you want me to install %s %s?",
"this" if len(missing_dependencies) == 1 else "these",
"dependency" if len(missing_dependencies) == 1 else "dependencies",
), default=True)
except KeyboardInterrupt:
# Control-C is a negative response but doesn't
# otherwise interrupt the program flow.
return False
|
python
|
{
"resource": ""
}
|
q6658
|
compact
|
train
|
def compact(text, **kw):
"""
Compact whitespace in a string and format any keyword arguments into the string.
:param text: The text to compact (a string).
:param kw: Any keyword arguments to apply using :func:`str.format()`.
:returns: The compacted, formatted string.
The whitespace compaction preserves paragraphs.
"""
return '\n\n'.join(' '.join(p.split()) for p in text.split('\n\n')).format(**kw)
|
python
|
{
"resource": ""
}
|
q6659
|
expand_path
|
train
|
def expand_path(pathname):
"""
Expand the home directory in a pathname based on the effective user id.
:param pathname: A pathname that may start with ``~/``, indicating the path
should be interpreted as being relative to the home
directory of the current (effective) user.
:returns: The (modified) pathname.
This function is a variant of :func:`os.path.expanduser()` that doesn't use
``$HOME`` but instead uses the home directory of the effective user id.
This is basically a workaround for ``sudo -s`` not resetting ``$HOME``.
"""
# The following logic previously used regular expressions but that approach
# turned out to be very error prone, hence the current contraption based on
# direct string manipulation :-).
home_directory = find_home_directory()
separators = set([os.sep])
if os.altsep is not None:
separators.add(os.altsep)
if len(pathname) >= 2 and pathname[0] == '~' and pathname[1] in separators:
pathname = os.path.join(home_directory, pathname[2:])
# Also expand environment variables.
return parse_path(pathname)
|
python
|
{
"resource": ""
}
|
q6660
|
find_home_directory
|
train
|
def find_home_directory():
"""
Look up the home directory of the effective user id.
:returns: The pathname of the home directory (a string).
.. note:: On Windows this uses the ``%APPDATA%`` environment variable (if
available) and otherwise falls back to ``~/Application Data``.
"""
if WINDOWS:
directory = os.environ.get('APPDATA')
if not directory:
directory = os.path.expanduser(r'~\Application Data')
else:
# This module isn't available on Windows so we have to import it here.
import pwd
# Look up the home directory of the effective user id so we can
# generate pathnames relative to the home directory.
entry = pwd.getpwuid(os.getuid())
directory = entry.pw_dir
return directory
|
python
|
{
"resource": ""
}
|
q6661
|
same_directories
|
train
|
def same_directories(path1, path2):
"""
Check if two pathnames refer to the same directory.
:param path1: The first pathname (a string).
:param path2: The second pathname (a string).
:returns: :data:`True` if both pathnames refer to the same directory,
:data:`False` otherwise.
"""
if all(os.path.isdir(p) for p in (path1, path2)):
try:
return os.path.samefile(path1, path2)
except AttributeError:
# On Windows and Python 2 os.path.samefile() is unavailable.
return os.path.realpath(path1) == os.path.realpath(path2)
else:
return False
|
python
|
{
"resource": ""
}
|
q6662
|
hash_files
|
train
|
def hash_files(method, *files):
"""
Calculate the hexadecimal digest of one or more local files.
:param method: The hash method (a string, given to :func:`hashlib.new()`).
:param files: The pathname(s) of file(s) to hash (zero or more strings).
:returns: The calculated hex digest (a string).
"""
context = hashlib.new(method)
for filename in files:
with open(filename, 'rb') as handle:
while True:
chunk = handle.read(4096)
if not chunk:
break
context.update(chunk)
return context.hexdigest()
|
python
|
{
"resource": ""
}
|
q6663
|
requirement_is_installed
|
train
|
def requirement_is_installed(expr):
"""
Check whether a requirement is installed.
:param expr: A requirement specification similar to those used in pip
requirement files (a string).
:returns: :data:`True` if the requirement is available (installed),
:data:`False` otherwise.
"""
required_dist = next(parse_requirements(expr))
try:
installed_dist = get_distribution(required_dist.key)
return installed_dist in required_dist
except DistributionNotFound:
return False
|
python
|
{
"resource": ""
}
|
q6664
|
match_option
|
train
|
def match_option(argument, short_option, long_option):
"""
Match a command line argument against a short and long option.
:param argument: The command line argument (a string).
:param short_option: The short option (a string).
:param long_option: The long option (a string).
:returns: :data:`True` if the argument matches, :data:`False` otherwise.
"""
return short_option[1] in argument[1:] if is_short_option(argument) else argument == long_option
|
python
|
{
"resource": ""
}
|
q6665
|
match_option_with_value
|
train
|
def match_option_with_value(arguments, option, value):
"""
Check if a list of command line options contains an option with a value.
:param arguments: The command line arguments (a list of strings).
:param option: The long option (a string).
:param value: The expected value (a string).
:returns: :data:`True` if the command line contains the option/value pair,
:data:`False` otherwise.
"""
return ('%s=%s' % (option, value) in arguments or
contains_sublist(arguments, [option, value]))
|
python
|
{
"resource": ""
}
|
q6666
|
fromfile
|
train
|
def fromfile(file, dtype, count, *args, **kwargs):
"""Wrapper around np.fromfile to support any file-like object."""
try:
return numpy.fromfile(file, dtype=dtype, count=count, *args, **kwargs)
except (TypeError, IOError):
return numpy.frombuffer(file.read(count * numpy.dtype(dtype).itemsize),
dtype=dtype, count=count, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q6667
|
parse
|
train
|
def parse(path, meta_data_only=False, compensate=False, channel_naming='$PnS',
reformat_meta=False, data_set=0, dtype='float32', encoding="utf-8"):
"""Parse an fcs file at the location specified by the path.
Parameters
----------
path: str
Path of .fcs file
meta_data_only: bool
If True, the parse_fcs only returns the meta_data (the TEXT segment of the FCS file)
output_format: 'DataFrame' | 'ndarray'
If set to 'DataFrame' the returned
channel_naming: '$PnS' | '$PnN'
Determines which meta data field is used for naming the channels.
The default should be $PnS (even though it is not guaranteed to be unique)
$PnN stands for the short name (guaranteed to be unique).
Will look like 'FL1-H'
$PnS stands for the actual name (not guaranteed to be unique).
Will look like 'FSC-H' (Forward scatter)
The chosen field will be used to population self.channels
Note: These names are not flipped in the implementation.
It looks like they were swapped for some reason in the official FCS specification.
reformat_meta: bool
If true, the meta data is reformatted with the channel information organized
into a DataFrame and moved into the '_channels_' key
data_set: int
Index of retrieved data set in the fcs file.
This value specifies the data set being retrieved from an fcs file with multiple data sets.
dtype: str | None
If provided, will force convert all data into this dtype.
This is set by default to auto-convert to float32 to deal with cases in which the original
data has been stored using a smaller data type (e.g., unit8). This modifies the original
data, but should make follow up analysis safer in basically all cases.
encoding: str
Provide encoding type of the text section.
Returns
-------
if meta_data_only is True:
meta_data: dict
Contains a dictionary with the meta data information
Otherwise:
a 2-tuple with
the first element the meta_data (dictionary)
the second element the data (in either DataFrame or numpy format)
Examples
--------
fname = '../tests/data/EY_2013-05-03_EID_214_PID_1120_Piperacillin_Well_B7.001.fcs'
meta = parse_fcs(fname, meta_data_only=True)
meta, data_pandas = parse_fcs(fname, meta_data_only=False)
"""
if compensate:
raise ParserFeatureNotImplementedError(u'Compensation has not been implemented yet.')
read_data = not meta_data_only
fcs_parser = FCSParser(path, read_data=read_data, channel_naming=channel_naming,
data_set=data_set, encoding=encoding)
if reformat_meta:
fcs_parser.reformat_meta()
meta = fcs_parser.annotation
if meta_data_only:
return meta
else: # Then include both meta and dataframe.
df = fcs_parser.dataframe
df = df.astype(dtype) if dtype else df
return meta, df
|
python
|
{
"resource": ""
}
|
q6668
|
FCSParser.load_file
|
train
|
def load_file(self, file_handle, data_set=0, read_data=True):
"""Load the requested parts of the file into memory."""
file_handle.seek(0, 2)
self._file_size = file_handle.tell()
file_handle.seek(0)
data_segments = 0
# seek the correct data set in fcs
nextdata_offset = 0
while data_segments <= data_set:
self.read_header(file_handle, nextdata_offset)
self.read_text(file_handle)
if '$NEXTDATA' in self.annotation:
data_segments += 1
nextdata_offset = self.annotation['$NEXTDATA']
file_handle.seek(nextdata_offset)
if nextdata_offset == 0 and data_segments < data_set:
warnings.warn("File does not contain the number of data sets.")
break
else:
if data_segments != 0:
warnings.warn('File does not contain $NEXTDATA information.')
break
if read_data:
self.read_data(file_handle)
|
python
|
{
"resource": ""
}
|
q6669
|
FCSParser.from_data
|
train
|
def from_data(cls, data):
"""Load an FCS file from a bytes-like object.
Args:
data: buffer containing contents of an FCS file.
Returns:
FCSParser instance with data loaded
"""
obj = cls()
with contextlib.closing(BytesIO(data)) as file_handle:
obj.load_file(file_handle)
return obj
|
python
|
{
"resource": ""
}
|
q6670
|
FCSParser.read_header
|
train
|
def read_header(self, file_handle, nextdata_offset=0):
"""Read the header of the FCS file.
The header specifies where the annotation, data and analysis are located inside the binary
file.
Args:
file_handle: buffer containing FCS file.
nextdata_offset: byte offset of a set header from file start specified by $NEXTDATA
"""
header = {'FCS format': file_handle.read(6)}
file_handle.read(4) # 4 space characters after the FCS format
for field in ('text start', 'text end', 'data start', 'data end', 'analysis start',
'analysis end'):
s = file_handle.read(8)
try:
field_value = int(s)
except ValueError:
field_value = 0
header[field] = field_value + nextdata_offset
# Checking that the location of the TEXT segment is specified
for k in ('text start', 'text end'):
if header[k] == 0:
raise ValueError(u'The FCS file "{}" seems corrupted. (Parser cannot locate '
u'information about the "{}" segment.)'.format(self.path, k))
elif header[k] > self._file_size:
raise ValueError(u'The FCS file "{}" is corrupted. "{}" segment '
u'is larger than file size'.format(self.path, k))
else:
# All OK
pass
self._data_start = header['data start']
self._data_end = header['data start']
if header['analysis end'] - header['analysis start'] != 0:
warnings.warn(u'There appears to be some information in the ANALYSIS segment of file '
u'{0}. However, it might not be read correctly.'.format(self.path))
self.annotation['__header__'] = header
|
python
|
{
"resource": ""
}
|
q6671
|
FCSParser._extract_text_dict
|
train
|
def _extract_text_dict(self, raw_text):
"""Parse the TEXT segment of the FCS file into a python dictionary."""
delimiter = raw_text[0]
if raw_text[-1] != delimiter:
raw_text = raw_text.strip()
if raw_text[-1] != delimiter:
msg = (u'The first two characters were:\n {}. The last two characters were: {}\n'
u'Parser expects the same delimiter character in beginning '
u'and end of TEXT segment'.format(raw_text[:2], raw_text[-2:]))
raise ParserFeatureNotImplementedError(msg)
# The delimiter is escaped by being repeated (two consecutive delimiters). This code splits
# on the escaped delimiter first, so there is no need for extra logic to distinguish
# actual delimiters from escaped delimiters.
nested_split_list = [x.split(delimiter) for x in raw_text[1:-1].split(delimiter * 2)]
# 1:-1 above removes the first and last characters which are reserved for the delimiter.
# Flatten the nested list to a list of elements (alternating keys and values)
raw_text_elements = nested_split_list[0]
for partial_element_list in nested_split_list[1:]:
# Rejoin two parts of an element that was split by an escaped delimiter (the end and
# start of two successive sub-lists in nested_split_list)
raw_text_elements[-1] += (delimiter + partial_element_list[0])
raw_text_elements.extend(partial_element_list[1:])
keys, values = raw_text_elements[0::2], raw_text_elements[1::2]
return dict(zip(keys, values))
|
python
|
{
"resource": ""
}
|
q6672
|
FCSParser.read_text
|
train
|
def read_text(self, file_handle):
"""Parse the TEXT segment of the FCS file.
The TEXT segment contains meta data associated with the FCS file.
Converting all meta keywords to lower case.
"""
header = self.annotation['__header__'] # For convenience
#####
# Read in the TEXT segment of the FCS file
# There are some differences in how the
file_handle.seek(header['text start'], 0)
raw_text = file_handle.read(header['text end'] - header['text start'] + 1)
try:
raw_text = raw_text.decode(self._encoding)
except UnicodeDecodeError as e:
# Catching the exception and logging it in this way kills the traceback, but
# we can worry about this later.
logger.warning(u'Encountered an illegal utf-8 byte in the header.\n Illegal utf-8 '
u'characters will be ignored.\n{}'.format(e))
raw_text = raw_text.decode(self._encoding, errors='ignore')
text = self._extract_text_dict(raw_text)
##
# Extract channel names and convert some of the channel properties
# and other fields into numeric data types (from string)
# Note: do not use regular expressions for manipulations here.
# Regular expressions are too heavy in terms of computation time.
pars = int(text['$PAR'])
if '$P0B' in text.keys(): # Checking whether channel number count starts from 0 or from 1
self.channel_numbers = range(0, pars) # Channel number count starts from 0
else:
self.channel_numbers = range(1, pars + 1) # Channel numbers start from 1
# Extract parameter names
try:
names_n = tuple([text['$P{0}N'.format(i)] for i in self.channel_numbers])
except KeyError:
names_n = []
try:
names_s = tuple([text['$P{0}S'.format(i)] for i in self.channel_numbers])
except KeyError:
names_s = []
self.channel_names_s = names_s
self.channel_names_n = names_n
# Convert some of the fields into integer values
keys_encoding_bits = ['$P{0}B'.format(i) for i in self.channel_numbers]
add_keys_to_convert_to_int = ['$NEXTDATA', '$PAR', '$TOT']
keys_to_convert_to_int = keys_encoding_bits + add_keys_to_convert_to_int
for key in keys_to_convert_to_int:
value = text[key]
text[key] = int(value)
self.annotation.update(text)
# Update data start segments if needed
if self._data_start == 0:
self._data_start = int(text['$BEGINDATA'])
if self._data_end == 0:
self._data_end = int(text['$ENDDATA'])
|
python
|
{
"resource": ""
}
|
q6673
|
FCSParser.read_analysis
|
train
|
def read_analysis(self, file_handle):
"""Read the ANALYSIS segment of the FCS file and store it in self.analysis.
Warning: This has never been tested with an actual fcs file that contains an
analysis segment.
Args:
file_handle: buffer containing FCS data
"""
start = self.annotation['__header__']['analysis start']
end = self.annotation['__header__']['analysis end']
if start != 0 and end != 0:
file_handle.seek(start, 0)
self._analysis = file_handle.read(end - start)
else:
self._analysis = None
|
python
|
{
"resource": ""
}
|
q6674
|
FCSParser._verify_assumptions
|
train
|
def _verify_assumptions(self):
"""Verify that all assumptions made by the parser hold."""
text = self.annotation
keys = text.keys()
if '$MODE' not in text or text['$MODE'] != 'L':
raise ParserFeatureNotImplementedError(u'Mode not implemented')
if '$P0B' in keys:
raise ParserFeatureNotImplementedError(u'Not expecting a parameter starting at 0')
if text['$BYTEORD'] not in ['1,2,3,4', '4,3,2,1', '1,2', '2,1']:
raise ParserFeatureNotImplementedError(u'$BYTEORD {} '
u'not implemented'.format(text['$BYTEORD']))
|
python
|
{
"resource": ""
}
|
q6675
|
FCSParser.get_channel_names
|
train
|
def get_channel_names(self):
"""Get list of channel names. Raises a warning if the names are not unique."""
names_s, names_n = self.channel_names_s, self.channel_names_n
# Figure out which channel names to use
if self._channel_naming == '$PnS':
channel_names, channel_names_alternate = names_s, names_n
else:
channel_names, channel_names_alternate = names_n, names_s
if len(channel_names) == 0:
channel_names = channel_names_alternate
if len(set(channel_names)) != len(channel_names):
msg = (u'The default channel names (defined by the {} '
u'parameter in the FCS file) were not unique. To avoid '
u'problems in downstream analysis, the channel names '
u'have been switched to the alternate channel names '
u'defined in the FCS file. To avoid '
u'seeing this warning message, explicitly instruct '
u'the FCS parser to use the alternate channel names by '
u'specifying the channel_naming parameter.')
msg = msg.format(self._channel_naming)
warnings.warn(msg)
channel_names = channel_names_alternate
return channel_names
|
python
|
{
"resource": ""
}
|
q6676
|
FCSParser.read_data
|
train
|
def read_data(self, file_handle):
"""Read the DATA segment of the FCS file."""
self._verify_assumptions()
text = self.annotation
if (self._data_start > self._file_size) or (self._data_end > self._file_size):
raise ValueError(u'The FCS file "{}" is corrupted. Part of the data segment '
u'is missing.'.format(self.path))
num_events = text['$TOT'] # Number of events recorded
num_pars = text['$PAR'] # Number of parameters recorded
if text['$BYTEORD'].strip() == '1,2,3,4' or text['$BYTEORD'].strip() == '1,2':
endian = '<'
elif text['$BYTEORD'].strip() == '4,3,2,1' or text['$BYTEORD'].strip() == '2,1':
endian = '>'
else:
msg = 'Unrecognized byte order ({})'.format(text['$BYTEORD'])
raise ParserFeatureNotImplementedError(msg)
# dictionary to convert from FCS format to numpy convention
conversion_dict = {'F': 'f', 'D': 'f', 'I': 'u'}
if text['$DATATYPE'] not in conversion_dict.keys():
raise ParserFeatureNotImplementedError('$DATATYPE = {0} is not yet '
'supported.'.format(text['$DATATYPE']))
# Calculations to figure out data types of each of parameters
# $PnB specifies the number of bits reserved for a measurement of parameter n
bytes_per_par_list = [int(text['$P{0}B'.format(i)] / 8) for i in self.channel_numbers]
par_numeric_type_list = [
'{endian}{type}{size}'.format(endian=endian,
type=conversion_dict[text['$DATATYPE']],
size=bytes_per_par)
for bytes_per_par in bytes_per_par_list
]
# Parser for list mode. Here, the order is a list of tuples.
# Each tuple stores event related information
file_handle.seek(self._data_start, 0) # Go to the part of the file where data starts
##
# Read in the data
if len(set(par_numeric_type_list)) > 1:
# This branch deals with files in which the different columns (channels)
# were encoded with different types; i.e., a mixed data format.
dtype = ','.join(par_numeric_type_list)
data = fromfile(file_handle, dtype, num_events)
# The dtypes in the numpy array `data` above are associated with both a name
# and a type; i.e.,
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.recarray.html
# The names are assigned automatically.
# In order for this code to work correctly with the pandas DataFrame constructor,
# we convert the *names* of the dtypes to the channel names we want to use.
names = self.get_channel_names()
if six.PY2:
encoded_names = [name.encode('ascii', errors='replace') for name in names]
else: # Assume that python3 or older then.
encoded_names = [name for name in names]
data.dtype.names = tuple(encoded_names)
else:
# values saved in a single data format
dtype = par_numeric_type_list[0]
data = fromfile(file_handle, dtype, num_events * num_pars)
data = data.reshape((num_events, num_pars))
##
# Convert to native byte order
# This is needed for working with pandas data structures
native_code = '<' if (sys.byteorder == 'little') else '>'
if endian != native_code:
# swaps the actual bytes and also the endianness
data = data.byteswap().newbyteorder()
self._data = data
|
python
|
{
"resource": ""
}
|
q6677
|
FCSParser.data
|
train
|
def data(self):
"""Get parsed DATA segment of the FCS file."""
if self._data is None:
with open(self.path, 'rb') as f:
self.read_data(f)
return self._data
|
python
|
{
"resource": ""
}
|
q6678
|
FCSParser.analysis
|
train
|
def analysis(self):
"""Get ANALYSIS segment of the FCS file."""
if self._analysis is None:
with open(self.path, 'rb') as f:
self.read_analysis(f)
return self._analysis
|
python
|
{
"resource": ""
}
|
q6679
|
FCSParser.reformat_meta
|
train
|
def reformat_meta(self):
"""Collect the meta data information in a more user friendly format.
Function looks through the meta data, collecting the channel related information into a
dataframe and moving it into the _channels_ key.
"""
meta = self.annotation # For shorthand (passed by reference)
channel_properties = []
for key, value in meta.items():
if key[:3] == '$P1':
if key[3] not in string.digits:
channel_properties.append(key[3:])
# Capture all the channel information in a list of lists -- used to create a data frame
channel_matrix = [
[meta.get('$P{0}{1}'.format(ch, p)) for p in channel_properties]
for ch in self.channel_numbers
]
# Remove this information from the dictionary
for ch in self.channel_numbers:
for p in channel_properties:
key = '$P{0}{1}'.format(ch, p)
if key in meta:
meta.pop(key)
num_channels = meta['$PAR']
column_names = ['$Pn{0}'.format(p) for p in channel_properties]
df = pd.DataFrame(channel_matrix, columns=column_names,
index=(1 + numpy.arange(num_channels)))
if '$PnE' in column_names:
df['$PnE'] = df['$PnE'].apply(lambda x: x.split(','))
df.index.name = 'Channel Number'
meta['_channels_'] = df
meta['_channel_names_'] = self.get_channel_names()
|
python
|
{
"resource": ""
}
|
q6680
|
FCSParser.dataframe
|
train
|
def dataframe(self):
"""Construct Pandas dataframe."""
data = self.data
channel_names = self.get_channel_names()
return pd.DataFrame(data, columns=channel_names)
|
python
|
{
"resource": ""
}
|
q6681
|
BinaryDistributionManager.get_binary_dist
|
train
|
def get_binary_dist(self, requirement):
"""
Get or create a cached binary distribution archive.
:param requirement: A :class:`.Requirement` object.
:returns: An iterable of tuples with two values each: A
:class:`tarfile.TarInfo` object and a file-like object.
Gets the cached binary distribution that was previously built for the
given requirement. If no binary distribution has been cached yet, a new
binary distribution is built and added to the cache.
Uses :func:`build_binary_dist()` to build binary distribution
archives. If this fails with a build error :func:`get_binary_dist()`
will use :class:`.SystemPackageManager` to check for and install
missing system packages and retry the build when missing system
packages were installed.
"""
cache_file = self.cache.get(requirement)
if cache_file:
if self.needs_invalidation(requirement, cache_file):
logger.info("Invalidating old %s binary (source has changed) ..", requirement)
cache_file = None
else:
logger.debug("%s hasn't been cached yet, doing so now.", requirement)
if not cache_file:
# Build the binary distribution.
try:
raw_file = self.build_binary_dist(requirement)
except BuildFailed:
logger.warning("Build of %s failed, checking for missing dependencies ..", requirement)
if self.system_package_manager.install_dependencies(requirement):
raw_file = self.build_binary_dist(requirement)
else:
raise
# Transform the binary distribution archive into a form that we can re-use.
fd, transformed_file = tempfile.mkstemp(prefix='pip-accel-bdist-', suffix='.tar.gz')
try:
archive = tarfile.open(transformed_file, 'w:gz')
try:
for member, from_handle in self.transform_binary_dist(raw_file):
archive.addfile(member, from_handle)
finally:
archive.close()
# Push the binary distribution archive to all available backends.
with open(transformed_file, 'rb') as handle:
self.cache.put(requirement, handle)
finally:
# Close file descriptor before removing the temporary file.
# Without closing Windows is complaining that the file cannot
# be removed because it is used by another process.
os.close(fd)
# Cleanup the temporary file.
os.remove(transformed_file)
# Get the absolute pathname of the file in the local cache.
cache_file = self.cache.get(requirement)
# Enable checksum based cache invalidation.
self.persist_checksum(requirement, cache_file)
archive = tarfile.open(cache_file, 'r:gz')
try:
for member in archive.getmembers():
yield member, archive.extractfile(member.name)
finally:
archive.close()
|
python
|
{
"resource": ""
}
|
q6682
|
BinaryDistributionManager.needs_invalidation
|
train
|
def needs_invalidation(self, requirement, cache_file):
"""
Check whether a cached binary distribution needs to be invalidated.
:param requirement: A :class:`.Requirement` object.
:param cache_file: The pathname of a cached binary distribution (a string).
:returns: :data:`True` if the cached binary distribution needs to be
invalidated, :data:`False` otherwise.
"""
if self.config.trust_mod_times:
return requirement.last_modified > os.path.getmtime(cache_file)
else:
checksum = self.recall_checksum(cache_file)
return checksum and checksum != requirement.checksum
|
python
|
{
"resource": ""
}
|
q6683
|
BinaryDistributionManager.recall_checksum
|
train
|
def recall_checksum(self, cache_file):
"""
Get the checksum of the input used to generate a binary distribution archive.
:param cache_file: The pathname of the binary distribution archive (a string).
:returns: The checksum (a string) or :data:`None` (when no checksum is available).
"""
# EAFP instead of LBYL because of concurrency between pip-accel
# processes (https://docs.python.org/2/glossary.html#term-lbyl).
checksum_file = '%s.txt' % cache_file
try:
with open(checksum_file) as handle:
contents = handle.read()
return contents.strip()
except IOError as e:
if e.errno == errno.ENOENT:
# Gracefully handle missing checksum files.
return None
else:
# Don't swallow exceptions we don't expect!
raise
|
python
|
{
"resource": ""
}
|
q6684
|
BinaryDistributionManager.persist_checksum
|
train
|
def persist_checksum(self, requirement, cache_file):
"""
Persist the checksum of the input used to generate a binary distribution.
:param requirement: A :class:`.Requirement` object.
:param cache_file: The pathname of a cached binary distribution (a string).
.. note:: The checksum is only calculated and persisted when
:attr:`~.Config.trust_mod_times` is :data:`False`.
"""
if not self.config.trust_mod_times:
checksum_file = '%s.txt' % cache_file
with AtomicReplace(checksum_file) as temporary_file:
with open(temporary_file, 'w') as handle:
handle.write('%s\n' % requirement.checksum)
|
python
|
{
"resource": ""
}
|
q6685
|
BinaryDistributionManager.build_binary_dist
|
train
|
def build_binary_dist(self, requirement):
"""
Build a binary distribution archive from an unpacked source distribution.
:param requirement: A :class:`.Requirement` object.
:returns: The pathname of a binary distribution archive (a string).
:raises: :exc:`.BinaryDistributionError` when the original command
and the fall back both fail to produce a binary distribution
archive.
This method uses the following command to build binary distributions:
.. code-block:: sh
$ python setup.py bdist_dumb --format=tar
This command can fail for two main reasons:
1. The package is missing binary dependencies.
2. The ``setup.py`` script doesn't (properly) implement ``bdist_dumb``
binary distribution format support.
The first case is dealt with in :func:`get_binary_dist()`. To deal
with the second case this method falls back to the following command:
.. code-block:: sh
$ python setup.py bdist
This fall back is almost never needed, but there are Python packages
out there which require this fall back (this method was added because
the installation of ``Paver==1.2.3`` failed, see `issue 37`_ for
details about that).
.. _issue 37: https://github.com/paylogic/pip-accel/issues/37
"""
try:
return self.build_binary_dist_helper(requirement, ['bdist_dumb', '--format=tar'])
except (BuildFailed, NoBuildOutput):
logger.warning("Build of %s failed, falling back to alternative method ..", requirement)
return self.build_binary_dist_helper(requirement, ['bdist', '--formats=gztar'])
|
python
|
{
"resource": ""
}
|
q6686
|
BinaryDistributionManager.transform_binary_dist
|
train
|
def transform_binary_dist(self, archive_path):
"""
Transform binary distributions into a form that can be cached for future use.
:param archive_path: The pathname of the original binary distribution archive.
:returns: An iterable of tuples with two values each:
1. A :class:`tarfile.TarInfo` object.
2. A file-like object.
This method transforms a binary distribution archive created by
:func:`build_binary_dist()` into a form that can be cached for future
use. This comes down to making the pathnames inside the archive
relative to the `prefix` that the binary distribution was built for.
"""
# Copy the tar archive file by file so we can rewrite the pathnames.
logger.debug("Transforming binary distribution: %s.", archive_path)
archive = tarfile.open(archive_path, 'r')
for member in archive.getmembers():
# Some source distribution archives on PyPI that are distributed as ZIP
# archives contain really weird permissions: the world readable bit is
# missing. I've encountered this with the httplib2 (0.9) and
# google-api-python-client (1.2) packages. I assume this is a bug of
# some kind in the packaging process on "their" side.
if member.mode & stat.S_IXUSR:
# If the owner has execute permissions we'll give everyone read and
# execute permissions (only the owner gets write permissions).
member.mode = 0o755
else:
# If the owner doesn't have execute permissions we'll give everyone
# read permissions (only the owner gets write permissions).
member.mode = 0o644
# In my testing the `dumb' tar files created with the `python
# setup.py bdist' and `python setup.py bdist_dumb' commands contain
# pathnames that are relative to `/' in one way or another:
#
# - In almost all cases the pathnames look like this:
#
# ./home/peter/.virtualenvs/pip-accel/lib/python2.7/site-packages/pip_accel/__init__.py
#
# - After working on pip-accel for several years I encountered
# a pathname like this (Python 2.6 on Mac OS X 10.10.5):
#
# Users/peter/.virtualenvs/pip-accel/lib/python2.6/site-packages/pip_accel/__init__.py
#
# Both of the above pathnames are relative to `/' but in different
# ways :-). The following normpath(join('/', ...))) pathname
# manipulation logic is intended to handle both cases.
original_pathname = member.name
absolute_pathname = os.path.normpath(os.path.join('/', original_pathname))
if member.isdev():
logger.warn("Ignoring device file: %s.", absolute_pathname)
elif not member.isdir():
modified_pathname = os.path.relpath(absolute_pathname, self.config.install_prefix)
if os.path.isabs(modified_pathname):
logger.warn("Failed to transform pathname in binary distribution"
" to relative path! (original: %r, modified: %r)",
original_pathname, modified_pathname)
else:
# Rewrite /usr/local to /usr (same goes for all prefixes of course).
modified_pathname = re.sub('^local/', '', modified_pathname)
# Rewrite /dist-packages/ to /site-packages/. For details see
# https://wiki.debian.org/Python#Deviations_from_upstream.
if self.config.on_debian:
modified_pathname = modified_pathname.replace('/dist-packages/', '/site-packages/')
# Enable operators to debug the transformation process.
logger.debug("Transformed %r -> %r.", original_pathname, modified_pathname)
# Get the file data from the input archive.
handle = archive.extractfile(original_pathname)
# Yield the modified metadata and a handle to the data.
member.name = modified_pathname
yield member, handle
archive.close()
|
python
|
{
"resource": ""
}
|
q6687
|
BinaryDistributionManager.install_binary_dist
|
train
|
def install_binary_dist(self, members, virtualenv_compatible=True, prefix=None,
python=None, track_installed_files=False):
"""
Install a binary distribution into the given prefix.
:param members: An iterable of tuples with two values each:
1. A :class:`tarfile.TarInfo` object.
2. A file-like object.
:param prefix: The "prefix" under which the requirements should be
installed. This will be a pathname like ``/usr``,
``/usr/local`` or the pathname of a virtual environment.
Defaults to :attr:`.Config.install_prefix`.
:param python: The pathname of the Python executable to use in the shebang
line of all executable Python scripts inside the binary
distribution. Defaults to :attr:`.Config.python_executable`.
:param virtualenv_compatible: Whether to enable workarounds to make the
resulting filenames compatible with
virtual environments (defaults to
:data:`True`).
:param track_installed_files: If this is :data:`True` (not the default for
this method because of backwards
compatibility) pip-accel will create
``installed-files.txt`` as required by
pip to properly uninstall packages.
This method installs a binary distribution created by
:class:`build_binary_dist()` into the given prefix (a directory like
``/usr``, ``/usr/local`` or a virtual environment).
"""
# TODO This is quite slow for modules like Django. Speed it up! Two choices:
# 1. Run the external tar program to unpack the archive. This will
# slightly complicate the fixing up of hashbangs.
# 2. Using links? The plan: We can maintain a "seed" environment under
# $PIP_ACCEL_CACHE and use symbolic and/or hard links to populate other
# places based on the "seed" environment.
module_search_path = set(map(os.path.normpath, sys.path))
prefix = os.path.normpath(prefix or self.config.install_prefix)
python = os.path.normpath(python or self.config.python_executable)
installed_files = []
for member, from_handle in members:
pathname = member.name
if virtualenv_compatible:
# Some binary distributions include C header files (see for example
# the greenlet package) however the subdirectory of include/ in a
# virtual environment is a symbolic link to a subdirectory of
# /usr/include/ so we should never try to install C header files
# inside the directory pointed to by the symbolic link. Instead we
# implement the same workaround that pip uses to avoid this
# problem.
pathname = re.sub('^include/', 'include/site/', pathname)
if self.config.on_debian and '/site-packages/' in pathname:
# On Debian based system wide Python installs the /site-packages/
# directory is not in Python's module search path while
# /dist-packages/ is. We try to be compatible with this.
match = re.match('^(.+?)/site-packages', pathname)
if match:
site_packages = os.path.normpath(os.path.join(prefix, match.group(0)))
dist_packages = os.path.normpath(os.path.join(prefix, match.group(1), 'dist-packages'))
if dist_packages in module_search_path and site_packages not in module_search_path:
pathname = pathname.replace('/site-packages/', '/dist-packages/')
pathname = os.path.join(prefix, pathname)
if track_installed_files:
# Track the installed file's absolute pathname.
installed_files.append(pathname)
directory = os.path.dirname(pathname)
if not os.path.isdir(directory):
logger.debug("Creating directory: %s ..", directory)
makedirs(directory)
logger.debug("Creating file: %s ..", pathname)
with open(pathname, 'wb') as to_handle:
contents = from_handle.read()
if contents.startswith(b'#!/'):
contents = self.fix_hashbang(contents, python)
to_handle.write(contents)
os.chmod(pathname, member.mode)
if track_installed_files:
self.update_installed_files(installed_files)
|
python
|
{
"resource": ""
}
|
q6688
|
BinaryDistributionManager.fix_hashbang
|
train
|
def fix_hashbang(self, contents, python):
"""
Rewrite hashbangs_ to use the correct Python executable.
:param contents: The contents of the script whose hashbang should be
fixed (a string).
:param python: The absolute pathname of the Python executable (a
string).
:returns: The modified contents of the script (a string).
.. _hashbangs: http://en.wikipedia.org/wiki/Shebang_(Unix)
"""
lines = contents.splitlines()
if lines:
hashbang = lines[0]
# Get the base name of the command in the hashbang.
executable = os.path.basename(hashbang)
# Deal with hashbangs like `#!/usr/bin/env python'.
executable = re.sub(b'^env ', b'', executable)
# Only rewrite hashbangs that actually involve Python.
if re.match(b'^python(\\d+(\\.\\d+)*)?$', executable):
lines[0] = b'#!' + python.encode('ascii')
logger.debug("Rewriting hashbang %r to %r!", hashbang, lines[0])
contents = b'\n'.join(lines)
return contents
|
python
|
{
"resource": ""
}
|
q6689
|
BinaryDistributionManager.update_installed_files
|
train
|
def update_installed_files(self, installed_files):
"""
Track the files installed by a package so pip knows how to remove the package.
This method is used by :func:`install_binary_dist()` (which collects
the list of installed files for :func:`update_installed_files()`).
:param installed_files: A list of absolute pathnames (strings) with the
files that were just installed.
"""
# Find the *.egg-info directory where installed-files.txt should be created.
pkg_info_files = [fn for fn in installed_files if fnmatch.fnmatch(fn, '*.egg-info/PKG-INFO')]
# I'm not (yet) sure how reliable the above logic is, so for now
# I'll err on the side of caution and only act when the results
# seem to be reliable.
if len(pkg_info_files) != 1:
logger.warning("Not tracking installed files (couldn't reliably determine *.egg-info directory)")
else:
egg_info_directory = os.path.dirname(pkg_info_files[0])
installed_files_path = os.path.join(egg_info_directory, 'installed-files.txt')
logger.debug("Tracking installed files in %s ..", installed_files_path)
with open(installed_files_path, 'w') as handle:
for pathname in installed_files:
handle.write('%s\n' % os.path.relpath(pathname, egg_info_directory))
|
python
|
{
"resource": ""
}
|
q6690
|
Config.available_configuration_files
|
train
|
def available_configuration_files(self):
"""A list of strings with the absolute pathnames of the available configuration files."""
known_files = [GLOBAL_CONFIG, LOCAL_CONFIG, self.environment.get('PIP_ACCEL_CONFIG')]
absolute_paths = [parse_path(pathname) for pathname in known_files if pathname]
return [pathname for pathname in absolute_paths if os.path.isfile(pathname)]
|
python
|
{
"resource": ""
}
|
q6691
|
Config.load_configuration_file
|
train
|
def load_configuration_file(self, configuration_file):
"""
Load configuration defaults from a configuration file.
:param configuration_file: The pathname of a configuration file (a
string).
:raises: :exc:`Exception` when the configuration file cannot be
loaded.
"""
configuration_file = parse_path(configuration_file)
logger.debug("Loading configuration file: %s", configuration_file)
parser = configparser.RawConfigParser()
files_loaded = parser.read(configuration_file)
if len(files_loaded) != 1:
msg = "Failed to load configuration file! (%s)"
raise Exception(msg % configuration_file)
elif not parser.has_section('pip-accel'):
msg = "Missing 'pip-accel' section in configuration file! (%s)"
raise Exception(msg % configuration_file)
else:
self.configuration.update(parser.items('pip-accel'))
|
python
|
{
"resource": ""
}
|
q6692
|
Config.get
|
train
|
def get(self, property_name=None, environment_variable=None, configuration_option=None, default=None):
"""
Internal shortcut to get a configuration option's value.
:param property_name: The name of the property that users can set on
the :class:`Config` class (a string).
:param environment_variable: The name of the environment variable (a
string).
:param configuration_option: The name of the option in the
configuration file (a string).
:param default: The default value.
:returns: The value of the environment variable or configuration file
option or the default value.
"""
if self.overrides.get(property_name) is not None:
return self.overrides[property_name]
elif environment_variable and self.environment.get(environment_variable):
return self.environment[environment_variable]
elif self.configuration.get(configuration_option) is not None:
return self.configuration[configuration_option]
else:
return default
|
python
|
{
"resource": ""
}
|
q6693
|
Config.auto_install
|
train
|
def auto_install(self):
"""
Whether automatic installation of missing system packages is enabled.
:data:`True` if automatic installation of missing system packages is
enabled, :data:`False` if it is disabled, :data:`None` otherwise (in this case
the user will be prompted at the appropriate time).
- Environment variable: ``$PIP_ACCEL_AUTO_INSTALL`` (refer to
:func:`~humanfriendly.coerce_boolean()` for details on how the
value of the environment variable is interpreted)
- Configuration option: ``auto-install`` (also parsed using
:func:`~humanfriendly.coerce_boolean()`)
- Default: :data:`None`
"""
value = self.get(property_name='auto_install',
environment_variable='PIP_ACCEL_AUTO_INSTALL',
configuration_option='auto-install')
if value is not None:
return coerce_boolean(value)
|
python
|
{
"resource": ""
}
|
q6694
|
Config.trust_mod_times
|
train
|
def trust_mod_times(self):
"""
Whether to trust file modification times for cache invalidation.
- Environment variable: ``$PIP_ACCEL_TRUST_MOD_TIMES``
- Configuration option: ``trust-mod-times``
- Default: :data:`True` unless the AppVeyor_ continuous integration
environment is detected (see `issue 62`_).
.. _AppVeyor: http://www.appveyor.com
.. _issue 62: https://github.com/paylogic/pip-accel/issues/62
"""
on_appveyor = coerce_boolean(os.environ.get('APPVEYOR', 'False'))
return coerce_boolean(self.get(property_name='trust_mod_times',
environment_variable='PIP_ACCEL_TRUST_MOD_TIMES',
configuration_option='trust-mod-times',
default=(not on_appveyor)))
|
python
|
{
"resource": ""
}
|
q6695
|
Config.s3_cache_readonly
|
train
|
def s3_cache_readonly(self):
"""
Whether the Amazon S3 bucket is considered read only.
If this is :data:`True` then the Amazon S3 bucket will only be used for
:class:`~pip_accel.caches.s3.S3CacheBackend.get()` operations (all
:class:`~pip_accel.caches.s3.S3CacheBackend.put()` operations will
be disabled).
- Environment variable: ``$PIP_ACCEL_S3_READONLY`` (refer to
:func:`~humanfriendly.coerce_boolean()` for details on how the
value of the environment variable is interpreted)
- Configuration option: ``s3-readonly`` (also parsed using
:func:`~humanfriendly.coerce_boolean()`)
- Default: :data:`False`
For details please refer to the :mod:`pip_accel.caches.s3` module.
"""
return coerce_boolean(self.get(property_name='s3_cache_readonly',
environment_variable='PIP_ACCEL_S3_READONLY',
configuration_option='s3-readonly',
default=False))
|
python
|
{
"resource": ""
}
|
q6696
|
S3CacheBackend.get
|
train
|
def get(self, filename):
"""
Download a distribution archive from the configured Amazon S3 bucket.
:param filename: The filename of the distribution archive (a string).
:returns: The pathname of a distribution archive on the local file
system or :data:`None`.
:raises: :exc:`.CacheBackendError` when any underlying method fails.
"""
timer = Timer()
self.check_prerequisites()
with PatchedBotoConfig():
# Check if the distribution archive is available.
raw_key = self.get_cache_key(filename)
logger.info("Checking if distribution archive is available in S3 bucket: %s", raw_key)
key = self.s3_bucket.get_key(raw_key)
if key is None:
logger.debug("Distribution archive is not available in S3 bucket.")
else:
# Download the distribution archive to the local binary index.
# TODO Shouldn't this use LocalCacheBackend.put() instead of
# implementing the same steps manually?!
logger.info("Downloading distribution archive from S3 bucket ..")
file_in_cache = os.path.join(self.config.binary_cache, filename)
makedirs(os.path.dirname(file_in_cache))
with AtomicReplace(file_in_cache) as temporary_file:
key.get_contents_to_filename(temporary_file)
logger.debug("Finished downloading distribution archive from S3 bucket in %s.", timer)
return file_in_cache
|
python
|
{
"resource": ""
}
|
q6697
|
S3CacheBackend.put
|
train
|
def put(self, filename, handle):
"""
Upload a distribution archive to the configured Amazon S3 bucket.
If the :attr:`~.Config.s3_cache_readonly` configuration option is
enabled this method does nothing.
:param filename: The filename of the distribution archive (a string).
:param handle: A file-like object that provides access to the
distribution archive.
:raises: :exc:`.CacheBackendError` when any underlying method fails.
"""
if self.config.s3_cache_readonly:
logger.info('Skipping upload to S3 bucket (using S3 in read only mode).')
else:
timer = Timer()
self.check_prerequisites()
with PatchedBotoConfig():
from boto.s3.key import Key
raw_key = self.get_cache_key(filename)
logger.info("Uploading distribution archive to S3 bucket: %s", raw_key)
key = Key(self.s3_bucket)
key.key = raw_key
try:
key.set_contents_from_file(handle)
except Exception as e:
logger.info("Encountered error writing to S3 bucket, "
"falling back to read only mode (exception: %s)", e)
self.config.s3_cache_readonly = True
else:
logger.info("Finished uploading distribution archive to S3 bucket in %s.", timer)
|
python
|
{
"resource": ""
}
|
q6698
|
S3CacheBackend.s3_connection
|
train
|
def s3_connection(self):
"""
Connect to the Amazon S3 API.
If the connection attempt fails because Boto can't find credentials the
attempt is retried once with an anonymous connection.
Called on demand by :attr:`s3_bucket`.
:returns: A :class:`boto.s3.connection.S3Connection` object.
:raises: :exc:`.CacheBackendError` when the connection to the Amazon
S3 API fails.
"""
if not hasattr(self, 'cached_connection'):
self.check_prerequisites()
with PatchedBotoConfig():
import boto
from boto.exception import BotoClientError, BotoServerError, NoAuthHandlerFound
from boto.s3.connection import S3Connection, SubdomainCallingFormat, OrdinaryCallingFormat
try:
# Configure the number of retries and the socket timeout used
# by Boto. Based on the snippet given in the following email:
# https://groups.google.com/d/msg/boto-users/0osmP0cUl5Y/X4NdlMGWKiEJ
if not boto.config.has_section(BOTO_CONFIG_SECTION):
boto.config.add_section(BOTO_CONFIG_SECTION)
boto.config.set(BOTO_CONFIG_SECTION,
BOTO_CONFIG_NUM_RETRIES_OPTION,
str(self.config.s3_cache_retries))
boto.config.set(BOTO_CONFIG_SECTION,
BOTO_CONFIG_SOCKET_TIMEOUT_OPTION,
str(self.config.s3_cache_timeout))
logger.debug("Connecting to Amazon S3 API ..")
endpoint = urlparse(self.config.s3_cache_url)
host, _, port = endpoint.netloc.partition(':')
kw = dict(
host=host,
port=int(port) if port else None,
is_secure=(endpoint.scheme == 'https'),
calling_format=(SubdomainCallingFormat() if host == S3Connection.DefaultHost
else OrdinaryCallingFormat()),
)
try:
self.cached_connection = S3Connection(**kw)
except NoAuthHandlerFound:
logger.debug("Amazon S3 API credentials missing, retrying with anonymous connection ..")
self.cached_connection = S3Connection(anon=True, **kw)
except (BotoClientError, BotoServerError):
raise CacheBackendError("""
Failed to connect to the Amazon S3 API! Most likely your
credentials are not correctly configured. The Amazon S3
cache backend will be disabled for now.
""")
return self.cached_connection
|
python
|
{
"resource": ""
}
|
q6699
|
Requirement.wheel_metadata
|
train
|
def wheel_metadata(self):
"""Get the distribution metadata of an unpacked wheel distribution."""
if not self.is_wheel:
raise TypeError("Requirement is not a wheel distribution!")
for distribution in find_distributions(self.source_directory):
return distribution
msg = "pkg_resources didn't find a wheel distribution in %s!"
raise Exception(msg % self.source_directory)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.