id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13,300
|
win_uri.py
|
ansible_ansible/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_uri.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Corwin Brown <corwin@corwinbrown.com>
# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: win_uri
short_description: Interacts with webservices
description:
- Interacts with FTP, HTTP and HTTPS web services.
- Supports Digest, Basic and WSSE HTTP authentication mechanisms.
- For non-Windows targets, use the M(ansible.builtin.uri) module instead.
options:
url:
description:
- Supports FTP, HTTP or HTTPS URLs in the form of (ftp|http|https)://host.domain:port/path.
type: str
required: yes
content_type:
description:
- Sets the "Content-Type" header.
type: str
body:
description:
- The body of the HTTP request/response to the web service.
type: raw
dest:
description:
- Output the response body to a file.
type: path
creates:
description:
- A filename, when it already exists, this step will be skipped.
type: path
removes:
description:
- A filename, when it does not exist, this step will be skipped.
type: path
return_content:
description:
- Whether or not to return the body of the response as a "content" key in
the dictionary result. If the reported Content-type is
"application/json", then the JSON is additionally loaded into a key
called C(json) in the dictionary results.
type: bool
default: no
status_code:
description:
- A valid, numeric, HTTP status code that signifies success of the request.
- Can also be comma separated list of status codes.
type: list
elements: int
default: [ 200 ]
url_method:
default: GET
aliases:
- method
url_timeout:
aliases:
- timeout
# Following defined in the web_request fragment but the module contains deprecated aliases for backwards compatibility.
url_username:
description:
- The username to use for authentication.
- The alias I(user) and I(username) is deprecated and will be removed on
the major release after C(2022-07-01).
aliases:
- user
- username
url_password:
description:
- The password for I(url_username).
- The alias I(password) is deprecated and will be removed on the major
release after C(2022-07-01).
aliases:
- password
extends_documentation_fragment:
- ansible.windows.web_request
seealso:
- module: ansible.builtin.uri
- module: ansible.windows.win_get_url
author:
- Corwin Brown (@blakfeld)
- Dag Wieers (@dagwieers)
"""
EXAMPLES = r"""
- name: Perform a GET and Store Output
ansible.windows.win_uri:
url: http://example.com/endpoint
register: http_output
# Set a HOST header to hit an internal webserver:
- name: Hit a Specific Host on the Server
ansible.windows.win_uri:
url: http://example.com/
method: GET
headers:
host: www.somesite.com
- name: Perform a HEAD on an Endpoint
ansible.windows.win_uri:
url: http://www.example.com/
method: HEAD
- name: POST a Body to an Endpoint
ansible.windows.win_uri:
url: http://www.somesite.com/
method: POST
body: "{ 'some': 'json' }"
"""
RETURN = r"""
elapsed:
description: The number of seconds that elapsed while performing the download.
returned: always
type: float
sample: 23.2
url:
description: The Target URL.
returned: always
type: str
sample: https://www.ansible.com
status_code:
description: The HTTP Status Code of the response.
returned: success
type: int
sample: 200
status_description:
description: A summary of the status.
returned: success
type: str
sample: OK
content:
description: The raw content of the HTTP response.
returned: success and return_content is True
type: str
sample: '{"foo": "bar"}'
content_length:
description: The byte size of the response.
returned: success
type: int
sample: 54447
json:
description: The json structure returned under content as a dictionary.
returned: success and Content-Type is "application/json" or "application/javascript" and return_content is True
type: dict
sample: {"this-is-dependent": "on the actual return content"}
"""
| 4,256
|
Python
|
.py
| 145
| 25.910345
| 121
| 0.721775
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,301
|
win_stat.py
|
ansible_ansible/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_stat
version_added: "1.7"
short_description: Get information about Windows files
description:
- Returns information about a Windows file.
- For non-Windows targets, use the M(stat) module instead.
options:
path:
description:
- The full path of the file/object to get the facts of; both forward and
back slashes are accepted.
type: path
required: yes
aliases: [ dest, name ]
get_checksum:
description:
- Whether to return a checksum of the file (default sha1)
type: bool
default: yes
version_added: "2.1"
checksum_algorithm:
description:
- Algorithm to determine checksum of file.
- Will throw an error if the host is unable to use specified algorithm.
type: str
default: sha1
choices: [ md5, sha1, sha256, sha384, sha512 ]
version_added: "2.3"
follow:
description:
- Whether to follow symlinks or junction points.
- In the case of C(path) pointing to another link, then that will
be followed until no more links are found.
type: bool
default: no
version_added: "2.8"
seealso:
- module: stat
- module: win_acl
- module: win_file
- module: win_owner
author:
- Chris Church (@cchurch)
"""
EXAMPLES = r"""
- name: Obtain information about a file
win_stat:
path: C:\foo.ini
register: file_info
- name: Obtain information about a folder
win_stat:
path: C:\bar
register: folder_info
- name: Get MD5 checksum of a file
win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: md5
register: md5_checksum
- debug:
var: md5_checksum.stat.checksum
- name: Get SHA1 checksum of file
win_stat:
path: C:\foo.ini
get_checksum: yes
register: sha1_checksum
- debug:
var: sha1_checksum.stat.checksum
- name: Get SHA256 checksum of file
win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: sha256
register: sha256_checksum
- debug:
var: sha256_checksum.stat.checksum
"""
RETURN = r"""
changed:
description: Whether anything was changed
returned: always
type: bool
sample: true
stat:
description: dictionary containing all the stat data
returned: success
type: complex
contains:
attributes:
description: Attributes of the file at path in raw form.
returned: success, path exists
type: str
sample: "Archive, Hidden"
checksum:
description: The checksum of a file based on checksum_algorithm specified.
returned: success, path exist, path is a file, get_checksum == True
checksum_algorithm specified is supported
type: str
sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
creationtime:
description: The create time of the file represented in seconds since epoch.
returned: success, path exists
type: float
sample: 1477984205.15
exists:
description: If the path exists or not.
returned: success
type: bool
sample: true
extension:
description: The extension of the file at path.
returned: success, path exists, path is a file
type: str
sample: ".ps1"
filename:
description: The name of the file (without path).
returned: success, path exists, path is a file
type: str
sample: foo.ini
hlnk_targets:
description: List of other files pointing to the same file (hard links), excludes the current file.
returned: success, path exists
type: list
sample:
- C:\temp\file.txt
- C:\Windows\update.log
isarchive:
description: If the path is ready for archiving or not.
returned: success, path exists
type: bool
sample: true
isdir:
description: If the path is a directory or not.
returned: success, path exists
type: bool
sample: true
ishidden:
description: If the path is hidden or not.
returned: success, path exists
type: bool
sample: true
isjunction:
description: If the path is a junction point or not.
returned: success, path exists
type: bool
sample: true
islnk:
description: If the path is a symbolic link or not.
returned: success, path exists
type: bool
sample: true
isreadonly:
description: If the path is read only or not.
returned: success, path exists
type: bool
sample: true
isreg:
description: If the path is a regular file.
returned: success, path exists
type: bool
sample: true
isshared:
description: If the path is shared or not.
returned: success, path exists
type: bool
sample: true
lastaccesstime:
description: The last access time of the file represented in seconds since epoch.
returned: success, path exists
type: float
sample: 1477984205.15
lastwritetime:
description: The last modification time of the file represented in seconds since epoch.
returned: success, path exists
type: float
sample: 1477984205.15
lnk_source:
description: Target of the symlink normalized for the remote filesystem.
returned: success, path exists and the path is a symbolic link or junction point
type: str
sample: C:\temp\link
lnk_target:
description: Target of the symlink. Note that relative paths remain relative.
returned: success, path exists and the path is a symbolic link or junction point
type: str
sample: ..\link
nlink:
description: Number of links to the file (hard links).
returned: success, path exists
type: int
sample: 1
owner:
description: The owner of the file.
returned: success, path exists
type: str
sample: BUILTIN\Administrators
path:
description: The full absolute path to the file.
returned: success, path exists, file exists
type: str
sample: C:\foo.ini
sharename:
description: The name of share if folder is shared.
returned: success, path exists, file is a directory and isshared == True
type: str
sample: file-share
size:
description: The size in bytes of a file or folder.
returned: success, path exists, file is not a link
type: int
sample: 1024
"""
| 7,577
|
Python
|
.py
| 223
| 24.901345
| 111
| 0.604277
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,302
|
win_copy.ps1
|
ansible_ansible/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1
|
#!powershell
# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#Requires -Module Ansible.ModuleUtils.Legacy
#Requires -Module Ansible.ModuleUtils.Backup
$ErrorActionPreference = 'Stop'
$params = Parse-Args -arguments $args -supports_check_mode $true
$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false
$diff_mode = Get-AnsibleParam -obj $params -name "_ansible_diff" -type "bool" -default $false
# there are 4 modes to win_copy which are driven by the action plugins:
# explode: src is a zip file which needs to be extracted to dest, for use with multiple files
# query: win_copy action plugin wants to get the state of remote files to check whether it needs to send them
# remote: all copy action is happening remotely (remote_src=True)
# single: a single file has been copied, also used with template
$copy_mode = Get-AnsibleParam -obj $params -name "_copy_mode" -type "str" -default "single" -validateset "explode","query","remote","single"
# used in explode, remote and single mode
$src = Get-AnsibleParam -obj $params -name "src" -type "path" -failifempty ($copy_mode -in @("explode","process","single"))
$dest = Get-AnsibleParam -obj $params -name "dest" -type "path" -failifempty $true
$backup = Get-AnsibleParam -obj $params -name "backup" -type "bool" -default $false
# used in single mode
$original_basename = Get-AnsibleParam -obj $params -name "_original_basename" -type "str"
# used in query and remote mode
$force = Get-AnsibleParam -obj $params -name "force" -type "bool" -default $true
# used in query mode, contains the local files/directories/symlinks that are to be copied
$files = Get-AnsibleParam -obj $params -name "files" -type "list"
$directories = Get-AnsibleParam -obj $params -name "directories" -type "list"
$result = @{
changed = $false
}
if ($diff_mode) {
$result.diff = @{}
}
Function Copy-File($source, $dest) {
$diff = ""
$copy_file = $false
$source_checksum = $null
if ($force) {
$source_checksum = Get-FileChecksum -path $source
}
if (Test-Path -LiteralPath $dest -PathType Container) {
Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': dest is already a folder"
} elseif (Test-Path -LiteralPath $dest -PathType Leaf) {
if ($force) {
$target_checksum = Get-FileChecksum -path $dest
if ($source_checksum -ne $target_checksum) {
$copy_file = $true
}
}
} else {
$copy_file = $true
}
if ($copy_file) {
$file_dir = [System.IO.Path]::GetDirectoryName($dest)
# validate the parent dir is not a file and that it exists
if (Test-Path -LiteralPath $file_dir -PathType Leaf) {
Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': object at dest parent dir is not a folder"
} elseif (-not (Test-Path -LiteralPath $file_dir)) {
# directory doesn't exist, need to create
New-Item -Path $file_dir -ItemType Directory -WhatIf:$check_mode | Out-Null
$diff += "+$file_dir\`n"
}
if ($backup) {
$result.backup_file = Backup-File -path $dest -WhatIf:$check_mode
}
if (Test-Path -LiteralPath $dest -PathType Leaf) {
Remove-Item -LiteralPath $dest -Force -Recurse -WhatIf:$check_mode | Out-Null
$diff += "-$dest`n"
}
if (-not $check_mode) {
# cannot run with -WhatIf:$check_mode as if the parent dir didn't
# exist and was created above would still not exist in check mode
Copy-Item -LiteralPath $source -Destination $dest -Force | Out-Null
}
$diff += "+$dest`n"
$result.changed = $true
}
# ugly but to save us from running the checksum twice, let's return it for
# the main code to add it to $result
return ,@{ diff = $diff; checksum = $source_checksum }
}
Function Copy-Folder($source, $dest) {
$diff = ""
if (-not (Test-Path -LiteralPath $dest -PathType Container)) {
$parent_dir = [System.IO.Path]::GetDirectoryName($dest)
if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': object at dest parent dir is not a folder"
}
if (Test-Path -LiteralPath $dest -PathType Leaf) {
Fail-Json -obj $result -message "cannot copy folder from '$source' to '$dest': dest is already a file"
}
New-Item -Path $dest -ItemType Container -WhatIf:$check_mode | Out-Null
$diff += "+$dest\`n"
$result.changed = $true
}
$child_items = Get-ChildItem -LiteralPath $source -Force
foreach ($child_item in $child_items) {
$dest_child_path = Join-Path -Path $dest -ChildPath $child_item.Name
if ($child_item.PSIsContainer) {
$diff += (Copy-Folder -source $child_item.Fullname -dest $dest_child_path)
} else {
$diff += (Copy-File -source $child_item.Fullname -dest $dest_child_path).diff
}
}
return $diff
}
Function Get-FileSize($path) {
$file = Get-Item -LiteralPath $path -Force
if ($file.PSIsContainer) {
$size = (Get-ChildItem -Literalpath $file.FullName -Recurse -Force | `
Where-Object { $_.PSObject.Properties.Name -contains 'Length' } | `
Measure-Object -Property Length -Sum).Sum
if ($null -eq $size) {
$size = 0
}
} else {
$size = $file.Length
}
$size
}
Function Extract-Zip($src, $dest) {
$archive = [System.IO.Compression.ZipFile]::Open($src, [System.IO.Compression.ZipArchiveMode]::Read, [System.Text.Encoding]::UTF8)
foreach ($entry in $archive.Entries) {
$archive_name = $entry.FullName
# FullName may be appended with / or \, determine if it is padded and remove it
$padding_length = $archive_name.Length % 4
if ($padding_length -eq 0) {
$is_dir = $false
$base64_name = $archive_name
} elseif ($padding_length -eq 1) {
$is_dir = $true
if ($archive_name.EndsWith("/") -or $archive_name.EndsWith("`\")) {
$base64_name = $archive_name.Substring(0, $archive_name.Length - 1)
} else {
throw "invalid base64 archive name '$archive_name'"
}
} else {
throw "invalid base64 length '$archive_name'"
}
# to handle unicode character, win_copy action plugin has encoded the filename
$decoded_archive_name = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($base64_name))
# re-add the / to the entry full name if it was a directory
if ($is_dir) {
$decoded_archive_name = "$decoded_archive_name/"
}
$entry_target_path = [System.IO.Path]::Combine($dest, $decoded_archive_name)
$entry_dir = [System.IO.Path]::GetDirectoryName($entry_target_path)
if (-not (Test-Path -LiteralPath $entry_dir)) {
New-Item -Path $entry_dir -ItemType Directory -WhatIf:$check_mode | Out-Null
}
if ($is_dir -eq $false) {
if (-not $check_mode) {
[System.IO.Compression.ZipFileExtensions]::ExtractToFile($entry, $entry_target_path, $true)
}
}
}
$archive.Dispose() # release the handle of the zip file
}
Function Extract-ZipLegacy($src, $dest) {
if (-not (Test-Path -LiteralPath $dest)) {
New-Item -Path $dest -ItemType Directory -WhatIf:$check_mode | Out-Null
}
$shell = New-Object -ComObject Shell.Application
$zip = $shell.NameSpace($src)
$dest_path = $shell.NameSpace($dest)
foreach ($entry in $zip.Items()) {
$is_dir = $entry.IsFolder
$encoded_archive_entry = $entry.Name
# to handle unicode character, win_copy action plugin has encoded the filename
$decoded_archive_entry = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($encoded_archive_entry))
if ($is_dir) {
$decoded_archive_entry = "$decoded_archive_entry/"
}
$entry_target_path = [System.IO.Path]::Combine($dest, $decoded_archive_entry)
$entry_dir = [System.IO.Path]::GetDirectoryName($entry_target_path)
if (-not (Test-Path -LiteralPath $entry_dir)) {
New-Item -Path $entry_dir -ItemType Directory -WhatIf:$check_mode | Out-Null
}
if ($is_dir -eq $false -and (-not $check_mode)) {
# https://msdn.microsoft.com/en-us/library/windows/desktop/bb787866.aspx
# From Folder.CopyHere documentation, 1044 means:
# - 1024: do not display a user interface if an error occurs
# - 16: respond with "yes to all" for any dialog box that is displayed
# - 4: do not display a progress dialog box
$dest_path.CopyHere($entry, 1044)
# once file is extracted, we need to rename it with non base64 name
$combined_encoded_path = [System.IO.Path]::Combine($dest, $encoded_archive_entry)
Move-Item -LiteralPath $combined_encoded_path -Destination $entry_target_path -Force | Out-Null
}
}
}
if ($copy_mode -eq "query") {
# we only return a list of files/directories that need to be copied over
# the source of the local file will be the key used
$changed_files = @()
$changed_directories = @()
$changed_symlinks = @()
foreach ($file in $files) {
$filename = $file.dest
$local_checksum = $file.checksum
$filepath = Join-Path -Path $dest -ChildPath $filename
if (Test-Path -LiteralPath $filepath -PathType Leaf) {
if ($force) {
$checksum = Get-FileChecksum -path $filepath
if ($checksum -ne $local_checksum) {
$changed_files += $file
}
}
} elseif (Test-Path -LiteralPath $filepath -PathType Container) {
Fail-Json -obj $result -message "cannot copy file to dest '$filepath': object at path is already a directory"
} else {
$changed_files += $file
}
}
foreach ($directory in $directories) {
$dirname = $directory.dest
$dirpath = Join-Path -Path $dest -ChildPath $dirname
$parent_dir = [System.IO.Path]::GetDirectoryName($dirpath)
if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
Fail-Json -obj $result -message "cannot copy folder to dest '$dirpath': object at parent directory path is already a file"
}
if (Test-Path -LiteralPath $dirpath -PathType Leaf) {
Fail-Json -obj $result -message "cannot copy folder to dest '$dirpath': object at path is already a file"
} elseif (-not (Test-Path -LiteralPath $dirpath -PathType Container)) {
$changed_directories += $directory
}
}
# TODO: Handle symlinks
$result.files = $changed_files
$result.directories = $changed_directories
$result.symlinks = $changed_symlinks
} elseif ($copy_mode -eq "explode") {
# a single zip file containing the files and directories needs to be
# expanded this will always result in a change as the calculation is done
# on the win_copy action plugin and is only run if a change needs to occur
if (-not (Test-Path -LiteralPath $src -PathType Leaf)) {
Fail-Json -obj $result -message "Cannot expand src zip file: '$src' as it does not exist"
}
# Detect if the PS zip assemblies are available or whether to use Shell
$use_legacy = $false
try {
Add-Type -AssemblyName System.IO.Compression.FileSystem | Out-Null
Add-Type -AssemblyName System.IO.Compression | Out-Null
} catch {
$use_legacy = $true
}
if ($use_legacy) {
Extract-ZipLegacy -src $src -dest $dest
} else {
Extract-Zip -src $src -dest $dest
}
$result.changed = $true
} elseif ($copy_mode -eq "remote") {
# all copy actions are happening on the remote side (windows host), need
# too copy source and dest using PS code
$result.src = $src
$result.dest = $dest
if (-not (Test-Path -LiteralPath $src)) {
Fail-Json -obj $result -message "Cannot copy src file: '$src' as it does not exist"
}
if (Test-Path -LiteralPath $src -PathType Container) {
# we are copying a directory or the contents of a directory
$result.operation = 'folder_copy'
if ($src.EndsWith("/") -or $src.EndsWith("`\")) {
# copying the folder's contents to dest
$diff = ""
$child_files = Get-ChildItem -LiteralPath $src -Force
foreach ($child_file in $child_files) {
$dest_child_path = Join-Path -Path $dest -ChildPath $child_file.Name
if ($child_file.PSIsContainer) {
$diff += Copy-Folder -source $child_file.FullName -dest $dest_child_path
} else {
$diff += (Copy-File -source $child_file.FullName -dest $dest_child_path).diff
}
}
} else {
# copying the folder and it's contents to dest
$dest = Join-Path -Path $dest -ChildPath (Get-Item -LiteralPath $src -Force).Name
$result.dest = $dest
$diff = Copy-Folder -source $src -dest $dest
}
} else {
# we are just copying a single file to dest
$result.operation = 'file_copy'
$source_basename = (Get-Item -LiteralPath $src -Force).Name
$result.original_basename = $source_basename
if ($dest.EndsWith("/") -or $dest.EndsWith("`\")) {
$dest = Join-Path -Path $dest -ChildPath (Get-Item -LiteralPath $src -Force).Name
$result.dest = $dest
} else {
# check if the parent dir exists, this is only done if src is a
# file and dest if the path to a file (doesn't end with \ or /)
$parent_dir = Split-Path -LiteralPath $dest
if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file"
} elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) {
Fail-Json -obj $result -message "Destination directory '$parent_dir' does not exist"
}
}
$copy_result = Copy-File -source $src -dest $dest
$diff = $copy_result.diff
$result.checksum = $copy_result.checksum
}
# the file might not exist if running in check mode
if (-not $check_mode -or (Test-Path -LiteralPath $dest -PathType Leaf)) {
$result.size = Get-FileSize -path $dest
} else {
$result.size = $null
}
if ($diff_mode) {
$result.diff.prepared = $diff
}
} elseif ($copy_mode -eq "single") {
# a single file is located in src and we need to copy to dest, this will
# always result in a change as the calculation is done on the Ansible side
# before this is run. This should also never run in check mode
if (-not (Test-Path -LiteralPath $src -PathType Leaf)) {
Fail-Json -obj $result -message "Cannot copy src file: '$src' as it does not exist"
}
# the dest parameter is a directory, we need to append original_basename
if ($dest.EndsWith("/") -or $dest.EndsWith("`\") -or (Test-Path -LiteralPath $dest -PathType Container)) {
$remote_dest = Join-Path -Path $dest -ChildPath $original_basename
$parent_dir = Split-Path -LiteralPath $remote_dest
# when dest ends with /, we need to create the destination directories
if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file"
} elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) {
New-Item -Path $parent_dir -ItemType Directory | Out-Null
}
} else {
$remote_dest = $dest
$parent_dir = Split-Path -LiteralPath $remote_dest
# check if the dest parent dirs exist, need to fail if they don't
if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file"
} elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) {
Fail-Json -obj $result -message "Destination directory '$parent_dir' does not exist"
}
}
if ($backup) {
$result.backup_file = Backup-File -path $remote_dest -WhatIf:$check_mode
}
Copy-Item -LiteralPath $src -Destination $remote_dest -Force | Out-Null
$result.changed = $true
}
Exit-Json -obj $result
| 17,087
|
Python
|
.py
| 346
| 41.179191
| 140
| 0.627847
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,303
|
win_ping.py
|
ansible_ansible/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_ping
version_added: "1.7"
short_description: A windows version of the classic ping module
description:
- Checks management connectivity of a windows host.
- This is NOT ICMP ping, this is just a trivial test module.
- For non-Windows targets, use the M(ping) module instead.
- For Network targets, use the M(net_ping) module instead.
options:
data:
description:
- Alternate data to return instead of 'pong'.
- If this parameter is set to C(crash), the module will cause an exception.
type: str
default: pong
seealso:
- module: ping
author:
- Chris Church (@cchurch)
"""
EXAMPLES = r"""
# Test connectivity to a windows host
# ansible winserver -m win_ping
- name: Example from an Ansible Playbook
win_ping:
- name: Induce an exception to see what happens
win_ping:
data: crash
"""
RETURN = r"""
ping:
description: Value provided with the data parameter.
returned: success
type: str
sample: pong
"""
| 1,451
|
Python
|
.py
| 47
| 27.617021
| 92
| 0.702006
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,304
|
win_file.py
|
ansible_ansible/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_file
version_added: "1.9.2"
short_description: Creates, touches or removes files or directories
description:
- Creates (empty) files, updates file modification stamps of existing files,
and can create or remove directories.
- Unlike M(file), does not modify ownership, permissions or manipulate links.
- For non-Windows targets, use the M(file) module instead.
options:
path:
description:
- Path to the file being managed.
required: yes
type: path
aliases: [ dest, name ]
state:
description:
- If C(directory), all immediate subdirectories will be created if they
do not exist.
- If C(file), the file will NOT be created if it does not exist, see the M(copy)
or M(template) module if you want that behavior.
- If C(absent), directories will be recursively deleted, and files will be removed.
- If C(touch), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
type: str
choices: [ absent, directory, file, touch ]
seealso:
- module: file
- module: win_acl
- module: win_acl_inheritance
- module: win_owner
- module: win_stat
author:
- Jon Hawkesworth (@jhawkesworth)
"""
EXAMPLES = r"""
- name: Touch a file (creates if not present, updates modification time if present)
win_file:
path: C:\Temp\foo.conf
state: touch
- name: Remove a file, if present
win_file:
path: C:\Temp\foo.conf
state: absent
- name: Create directory structure
win_file:
path: C:\Temp\folder\subfolder
state: directory
- name: Remove directory structure
win_file:
path: C:\Temp
state: absent
"""
| 2,184
|
Python
|
.py
| 63
| 30.444444
| 92
| 0.692999
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,305
|
runas.py
|
ansible_ansible/test/support/windows-integration/plugins/become/runas.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
become: runas
short_description: Run As user
description:
- This become plugins allows your remote/login user to execute commands as another user via the windows runas facility.
author: ansible (@core)
version_added: "2.8"
options:
become_user:
description: User you 'become' to execute the task
ini:
- section: privilege_escalation
key: become_user
- section: runas_become_plugin
key: user
vars:
- name: ansible_become_user
- name: ansible_runas_user
env:
- name: ANSIBLE_BECOME_USER
- name: ANSIBLE_RUNAS_USER
required: True
become_flags:
description: Options to pass to runas, a space delimited list of k=v pairs
default: ''
ini:
- section: privilege_escalation
key: become_flags
- section: runas_become_plugin
key: flags
vars:
- name: ansible_become_flags
- name: ansible_runas_flags
env:
- name: ANSIBLE_BECOME_FLAGS
- name: ANSIBLE_RUNAS_FLAGS
become_pass:
description: password
ini:
- section: runas_become_plugin
key: password
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_runas_pass
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_RUNAS_PASS
notes:
- runas is really implemented in the powershell module handler and as such can only be used with winrm connections.
- This plugin ignores the 'become_exe' setting as it uses an API and not an executable.
- The Secondary Logon service (seclogon) must be running to use runas
"""
from ansible.plugins.become import BecomeBase
class BecomeModule(BecomeBase):
name = 'runas'
def build_become_command(self, cmd, shell):
# runas is implemented inside the winrm connection plugin
return cmd
| 2,404
|
Python
|
.py
| 63
| 27.412698
| 127
| 0.585011
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,306
|
win_reboot.py
|
ansible_ansible/test/support/windows-integration/plugins/action/win_reboot.py
|
# Copyright: (c) 2018, Matt Davis <mdavis@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from datetime import datetime, timezone
from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.action import ActionBase
from ansible.plugins.action.reboot import ActionModule as RebootActionModule
from ansible.utils.display import Display
display = Display()
class TimedOutException(Exception):
pass
class ActionModule(RebootActionModule, ActionBase):
TRANSFERS_FILES = False
_VALID_ARGS = frozenset((
'connect_timeout', 'connect_timeout_sec', 'msg', 'post_reboot_delay', 'post_reboot_delay_sec', 'pre_reboot_delay', 'pre_reboot_delay_sec',
'reboot_timeout', 'reboot_timeout_sec', 'shutdown_timeout', 'shutdown_timeout_sec', 'test_command',
))
DEFAULT_BOOT_TIME_COMMAND = "(Get-WmiObject -ClassName Win32_OperatingSystem).LastBootUpTime"
DEFAULT_CONNECT_TIMEOUT = 5
DEFAULT_PRE_REBOOT_DELAY = 2
DEFAULT_SUDOABLE = False
DEFAULT_SHUTDOWN_COMMAND_ARGS = '/r /t {delay_sec} /c "{message}"'
DEPRECATED_ARGS = {
'shutdown_timeout': '2.5',
'shutdown_timeout_sec': '2.5',
}
def __init__(self, *args, **kwargs):
super(ActionModule, self).__init__(*args, **kwargs)
def get_distribution(self, task_vars):
return {'name': 'windows', 'version': '', 'family': ''}
def get_shutdown_command(self, task_vars, distribution):
return self.DEFAULT_SHUTDOWN_COMMAND
def run_test_command(self, distribution, **kwargs):
# Need to wrap the test_command in our PowerShell encoded wrapper. This is done to align the command input to a
# common shell and to allow the psrp connection plugin to report the correct exit code without manually setting
# $LASTEXITCODE for just that plugin.
test_command = self._task.args.get('test_command', self.DEFAULT_TEST_COMMAND)
kwargs['test_command'] = self._connection._shell._encode_script(test_command)
super(ActionModule, self).run_test_command(distribution, **kwargs)
def perform_reboot(self, task_vars, distribution):
shutdown_command = self.get_shutdown_command(task_vars, distribution)
shutdown_command_args = self.get_shutdown_command_args(distribution)
reboot_command = self._connection._shell._encode_script('{0} {1}'.format(shutdown_command, shutdown_command_args))
display.vvv("{action}: rebooting server...".format(action=self._task.action))
display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
display.debug("{action}: rebooting server with command '{command}'".format(action=self._task.action, command=reboot_command))
result = {}
reboot_result = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE)
result['start'] = datetime.now(timezone.utc)
# Test for "A system shutdown has already been scheduled. (1190)" and handle it gracefully
stdout = reboot_result['stdout']
stderr = reboot_result['stderr']
if reboot_result['rc'] == 1190 or (reboot_result['rc'] != 0 and "(1190)" in reboot_result['stderr']):
display.warning('A scheduled reboot was pre-empted by Ansible.')
# Try to abort (this may fail if it was already aborted)
result1 = self._low_level_execute_command(self._connection._shell._encode_script('shutdown /a'),
sudoable=self.DEFAULT_SUDOABLE)
# Initiate reboot again
result2 = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE)
reboot_result['rc'] = result2['rc']
stdout += result1['stdout'] + result2['stdout']
stderr += result1['stderr'] + result2['stderr']
if reboot_result['rc'] != 0:
result['failed'] = True
result['rebooted'] = False
result['msg'] = "Reboot command failed, error was: {stdout} {stderr}".format(
stdout=to_native(stdout.strip()),
stderr=to_native(stderr.strip()))
return result
result['failed'] = False
return result
| 4,343
|
Python
|
.py
| 71
| 52.43662
| 146
| 0.672158
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,307
|
win_copy.py
|
ansible_ansible/test/support/windows-integration/plugins/action/win_copy.py
|
# This file is part of Ansible
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import base64
import json
import os
import os.path
import shutil
import tempfile
import traceback
import zipfile
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum
def _walk_dirs(topdir, loader, decrypt=True, base_path=None, local_follow=False, trailing_slash_detector=None, checksum_check=False):
"""
Walk a filesystem tree returning enough information to copy the files.
This is similar to the _walk_dirs function in ``copy.py`` but returns
a dict instead of a tuple for each entry and includes the checksum of
a local file if wanted.
:arg topdir: The directory that the filesystem tree is rooted at
:arg loader: The self._loader object from ActionBase
:kwarg decrypt: Whether to decrypt a file encrypted with ansible-vault
:kwarg base_path: The initial directory structure to strip off of the
files for the destination directory. If this is None (the default),
the base_path is set to ``top_dir``.
:kwarg local_follow: Whether to follow symlinks on the source. When set
to False, no symlinks are dereferenced. When set to True (the
default), the code will dereference most symlinks. However, symlinks
can still be present if needed to break a circular link.
:kwarg trailing_slash_detector: Function to determine if a path has
a trailing directory separator. Only needed when dealing with paths on
a remote machine (in which case, pass in a function that is aware of the
directory separator conventions on the remote machine).
:kawrg whether to get the checksum of the local file and add to the dict
:returns: dictionary of dictionaries. All of the path elements in the structure are text string.
This separates all the files, directories, and symlinks along with
import information about each::
{
'files'; [{
src: '/absolute/path/to/copy/from',
dest: 'relative/path/to/copy/to',
checksum: 'b54ba7f5621240d403f06815f7246006ef8c7d43'
}, ...],
'directories'; [{
src: '/absolute/path/to/copy/from',
dest: 'relative/path/to/copy/to'
}, ...],
'symlinks'; [{
src: '/symlink/target/path',
dest: 'relative/path/to/copy/to'
}, ...],
}
The ``symlinks`` field is only populated if ``local_follow`` is set to False
*or* a circular symlink cannot be dereferenced. The ``checksum`` entry is set
to None if checksum_check=False.
"""
# Convert the path segments into byte strings
r_files = {'files': [], 'directories': [], 'symlinks': []}
def _recurse(topdir, rel_offset, parent_dirs, rel_base=u'', checksum_check=False):
"""
This is a closure (function utilizing variables from it's parent
function's scope) so that we only need one copy of all the containers.
Note that this function uses side effects (See the Variables used from
outer scope).
:arg topdir: The directory we are walking for files
:arg rel_offset: Integer defining how many characters to strip off of
the beginning of a path
:arg parent_dirs: Directories that we're copying that this directory is in.
:kwarg rel_base: String to prepend to the path after ``rel_offset`` is
applied to form the relative path.
Variables used from the outer scope
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:r_files: Dictionary of files in the hierarchy. See the return value
for :func:`walk` for the structure of this dictionary.
:local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks
"""
for base_path, sub_folders, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
dest_filepath = os.path.join(rel_base, filepath[rel_offset:])
if os.path.islink(filepath):
# Dereference the symlnk
real_file = loader.get_real_file(os.path.realpath(filepath), decrypt=decrypt)
if local_follow and os.path.isfile(real_file):
# Add the file pointed to by the symlink
r_files['files'].append(
{
"src": real_file,
"dest": dest_filepath,
"checksum": _get_local_checksum(checksum_check, real_file)
}
)
else:
# Mark this file as a symlink to copy
r_files['symlinks'].append({"src": os.readlink(filepath), "dest": dest_filepath})
else:
# Just a normal file
real_file = loader.get_real_file(filepath, decrypt=decrypt)
r_files['files'].append(
{
"src": real_file,
"dest": dest_filepath,
"checksum": _get_local_checksum(checksum_check, real_file)
}
)
for dirname in sub_folders:
dirpath = os.path.join(base_path, dirname)
dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:])
real_dir = os.path.realpath(dirpath)
dir_stats = os.stat(real_dir)
if os.path.islink(dirpath):
if local_follow:
if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs:
# Just insert the symlink if the target directory
# exists inside of the copy already
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Walk the dirpath to find all parent directories.
new_parents = set()
parent_dir_list = os.path.dirname(dirpath).split(os.path.sep)
for parent in range(len(parent_dir_list), 0, -1):
parent_stat = os.stat(u'/'.join(parent_dir_list[:parent]))
if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs:
# Reached the point at which the directory
# tree is already known. Don't add any
# more or we might go to an ancestor that
# isn't being copied.
break
new_parents.add((parent_stat.st_dev, parent_stat.st_ino))
if (dir_stats.st_dev, dir_stats.st_ino) in new_parents:
# This was a circular symlink. So add it as
# a symlink
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Walk the directory pointed to by the symlink
r_files['directories'].append({"src": real_dir, "dest": dest_dirpath})
offset = len(real_dir) + 1
_recurse(real_dir, offset, parent_dirs.union(new_parents),
rel_base=dest_dirpath,
checksum_check=checksum_check)
else:
# Add the symlink to the destination
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Just a normal directory
r_files['directories'].append({"src": dirpath, "dest": dest_dirpath})
# Check if the source ends with a "/" so that we know which directory
# level to work at (similar to rsync)
source_trailing_slash = False
if trailing_slash_detector:
source_trailing_slash = trailing_slash_detector(topdir)
else:
source_trailing_slash = topdir.endswith(os.path.sep)
# Calculate the offset needed to strip the base_path to make relative
# paths
if base_path is None:
base_path = topdir
if not source_trailing_slash:
base_path = os.path.dirname(base_path)
if topdir.startswith(base_path):
offset = len(base_path)
# Make sure we're making the new paths relative
if trailing_slash_detector and not trailing_slash_detector(base_path):
offset += 1
elif not base_path.endswith(os.path.sep):
offset += 1
if os.path.islink(topdir) and not local_follow:
r_files['symlinks'] = {"src": os.readlink(topdir), "dest": os.path.basename(topdir)}
return r_files
dir_stats = os.stat(topdir)
parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),))
# Actually walk the directory hierarchy
_recurse(topdir, offset, parents, checksum_check=checksum_check)
return r_files
def _get_local_checksum(get_checksum, local_path):
if get_checksum:
return checksum(local_path)
else:
return None
class ActionModule(ActionBase):
WIN_PATH_SEPARATOR = "\\"
def _create_content_tempfile(self, content):
""" Create a tempfile containing defined content """
fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
content = to_bytes(content)
try:
with os.fdopen(fd, 'wb') as f:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
return content_tempfile
def _create_zip_tempfile(self, files, directories):
tmpdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
zip_file_path = os.path.join(tmpdir, "win_copy.zip")
zip_file = zipfile.ZipFile(zip_file_path, "w", zipfile.ZIP_STORED, True)
# encoding the file/dir name with base64 so Windows can unzip a unicode
# filename and get the right name, Windows doesn't handle unicode names
# very well
for directory in directories:
directory_path = to_bytes(directory['src'], errors='surrogate_or_strict')
archive_path = to_bytes(directory['dest'], errors='surrogate_or_strict')
encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
zip_file.write(directory_path, encoded_path, zipfile.ZIP_DEFLATED)
for file in files:
file_path = to_bytes(file['src'], errors='surrogate_or_strict')
archive_path = to_bytes(file['dest'], errors='surrogate_or_strict')
encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
zip_file.write(file_path, encoded_path, zipfile.ZIP_DEFLATED)
return zip_file_path
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
def _copy_single_file(self, local_file, dest, source_rel, task_vars, tmp, backup):
if self._play_context.check_mode:
module_return = dict(changed=True)
return module_return
# copy the file across to the server
tmp_src = self._connection._shell.join_path(tmp, 'source')
self._transfer_file(local_file, tmp_src)
copy_args = self._task.args.copy()
copy_args.update(
dict(
dest=dest,
src=tmp_src,
_original_basename=source_rel,
_copy_mode="single",
backup=backup,
)
)
copy_args.pop('content', None)
copy_result = self._execute_module(module_name="copy",
module_args=copy_args,
task_vars=task_vars)
return copy_result
def _copy_zip_file(self, dest, files, directories, task_vars, tmp, backup):
# create local zip file containing all the files and directories that
# need to be copied to the server
if self._play_context.check_mode:
module_return = dict(changed=True)
return module_return
try:
zip_file = self._create_zip_tempfile(files, directories)
except Exception as e:
module_return = dict(
changed=False,
failed=True,
msg="failed to create tmp zip file: %s" % to_text(e),
exception=traceback.format_exc()
)
return module_return
zip_path = self._loader.get_real_file(zip_file)
# send zip file to remote, file must end in .zip so
# Com Shell.Application works
tmp_src = self._connection._shell.join_path(tmp, 'source.zip')
self._transfer_file(zip_path, tmp_src)
# run the explode operation of win_copy on remote
copy_args = self._task.args.copy()
copy_args.update(
dict(
src=tmp_src,
dest=dest,
_copy_mode="explode",
backup=backup,
)
)
copy_args.pop('content', None)
module_return = self._execute_module(module_name='copy',
module_args=copy_args,
task_vars=task_vars)
shutil.rmtree(os.path.dirname(zip_path))
return module_return
def run(self, tmp=None, task_vars=None):
""" handler for file transfer operations """
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
local_follow = boolean(self._task.args.get('local_follow', False), strict=False)
force = boolean(self._task.args.get('force', True), strict=False)
decrypt = boolean(self._task.args.get('decrypt', True), strict=False)
backup = boolean(self._task.args.get('backup', False), strict=False)
result['src'] = source
result['dest'] = dest
result['failed'] = True
if (source is None and content is None) or dest is None:
result['msg'] = "src (or content) and dest are required"
elif source is not None and content is not None:
result['msg'] = "src and content are mutually exclusive"
elif content is not None and dest is not None and (
dest.endswith(os.path.sep) or dest.endswith(self.WIN_PATH_SEPARATOR)):
result['msg'] = "dest must be a file if content is defined"
else:
del result['failed']
if result.get('failed'):
return result
# If content is defined make a temp file and write the content into it
content_tempfile = None
if content is not None:
try:
# if content comes to us as a dict it should be decoded json.
# We need to encode it back into a string and write it out
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
result['failed'] = True
result['msg'] = "could not write content tmp file: %s" % to_native(err)
return result
# all actions should occur on the remote server, run win_copy module
elif remote_src:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
_copy_mode="remote",
dest=dest,
src=source,
force=force,
backup=backup,
)
)
new_module_args.pop('content', None)
result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
return result
# find_needle returns a path that may not have a trailing slash on a
# directory so we need to find that out first and append at the end
else:
trailing_slash = source.endswith(os.path.sep)
try:
# find in expected paths
source = self._find_needle('files', source)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_text(e)
result['exception'] = traceback.format_exc()
return result
if trailing_slash != source.endswith(os.path.sep):
if source[-1] == os.path.sep:
source = source[:-1]
else:
source = source + os.path.sep
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = {'files': [], 'directories': [], 'symlinks': []}
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
result['operation'] = 'folder_copy'
# Get a list of the files we want to replicate on the remote side
source_files = _walk_dirs(source, self._loader, decrypt=decrypt, local_follow=local_follow,
trailing_slash_detector=self._connection._shell.path_has_trailing_slash,
checksum_check=force)
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - win_copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = "%s%s" % (dest, self.WIN_PATH_SEPARATOR)
check_dest = dest
# Source is a file, add details to source_files dict
else:
result['operation'] = 'file_copy'
# If the local file does not exist, get_real_file() raises AnsibleFileNotFound
try:
source_full = self._loader.get_real_file(source, decrypt=decrypt)
except AnsibleFileNotFound as e:
result['failed'] = True
result['msg'] = "could not find src=%s, %s" % (source, to_text(e))
return result
original_basename = os.path.basename(source)
result['original_basename'] = original_basename
# check if dest ends with / or \ and append source filename to dest
if self._connection._shell.path_has_trailing_slash(dest):
check_dest = dest
filename = original_basename
result['dest'] = self._connection._shell.join_path(dest, filename)
else:
# replace \\ with / so we can use os.path to get the filename or dirname
unix_path = dest.replace(self.WIN_PATH_SEPARATOR, os.path.sep)
filename = os.path.basename(unix_path)
check_dest = os.path.dirname(unix_path)
file_checksum = _get_local_checksum(force, source_full)
source_files['files'].append(
dict(
src=source_full,
dest=filename,
checksum=file_checksum
)
)
result['checksum'] = file_checksum
result['size'] = os.path.getsize(to_bytes(source_full, errors='surrogate_or_strict'))
# find out the files/directories/symlinks that we need to copy to the server
query_args = self._task.args.copy()
query_args.update(
dict(
_copy_mode="query",
dest=check_dest,
force=force,
files=source_files['files'],
directories=source_files['directories'],
symlinks=source_files['symlinks'],
)
)
# src is not required for query, will fail path validation is src has unix allowed chars
query_args.pop('src', None)
query_args.pop('content', None)
query_return = self._execute_module(module_args=query_args,
task_vars=task_vars)
if query_return.get('failed') is True:
result.update(query_return)
return result
if len(query_return['files']) > 0 or len(query_return['directories']) > 0 and self._connection._shell.tmpdir is None:
self._connection._shell.tmpdir = self._make_tmp_path()
if len(query_return['files']) == 1 and len(query_return['directories']) == 0:
# we only need to copy 1 file, don't mess around with zips
file_src = query_return['files'][0]['src']
file_dest = query_return['files'][0]['dest']
result.update(self._copy_single_file(file_src, dest, file_dest,
task_vars, self._connection._shell.tmpdir, backup))
if result.get('failed') is True:
result['msg'] = "failed to copy file %s: %s" % (file_src, result['msg'])
result['changed'] = True
elif len(query_return['files']) > 0 or len(query_return['directories']) > 0:
# either multiple files or directories need to be copied, compress
# to a zip and 'explode' the zip on the server
# TODO: handle symlinks
result.update(self._copy_zip_file(dest, source_files['files'],
source_files['directories'],
task_vars, self._connection._shell.tmpdir, backup))
result['changed'] = True
else:
# no operations need to occur
result['failed'] = False
result['changed'] = False
# remove the content tmp file and remote tmp file if it was created
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| 23,466
|
Python
|
.py
| 445
| 38.31236
| 133
| 0.56859
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,308
|
win_template.py
|
ansible_ansible/test/support/windows-integration/plugins/action/win_template.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.plugins.action import ActionBase
from ansible.plugins.action.template import ActionModule as TemplateActionModule
# Even though TemplateActionModule inherits from ActionBase, we still need to
# directly inherit from ActionBase to appease the plugin loader.
class ActionModule(TemplateActionModule, ActionBase):
DEFAULT_NEWLINE_SEQUENCE = '\r\n'
| 1,114
|
Python
|
.py
| 23
| 47.086957
| 80
| 0.798528
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,309
|
win_user_right.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_user_right.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r"""
---
module: win_user_right
version_added: '2.4'
short_description: Manage Windows User Rights
description:
- Add, remove or set User Rights for a group or users or groups.
- You can set user rights for both local and domain accounts.
options:
name:
description:
- The name of the User Right as shown by the C(Constant Name) value from
U(https://technet.microsoft.com/en-us/library/dd349804.aspx).
- The module will return an error if the right is invalid.
type: str
required: yes
users:
description:
- A list of users or groups to add/remove on the User Right.
- These can be in the form DOMAIN\user-group, user-group@DOMAIN.COM for
domain users/groups.
- For local users/groups it can be in the form user-group, .\user-group,
SERVERNAME\user-group where SERVERNAME is the name of the remote server.
- You can also add special local accounts like SYSTEM and others.
- Can be set to an empty list with I(action=set) to remove all accounts
from the right.
type: list
required: yes
action:
description:
- C(add) will add the users/groups to the existing right.
- C(remove) will remove the users/groups from the existing right.
- C(set) will replace the users/groups of the existing right.
type: str
default: set
choices: [ add, remove, set ]
notes:
- If the server is domain joined this module can change a right but if a GPO
governs this right then the changes won't last.
seealso:
- module: win_group
- module: win_group_membership
- module: win_user
author:
- Jordan Borean (@jborean93)
"""
EXAMPLES = r"""
---
- name: Replace the entries of Deny log on locally
win_user_right:
name: SeDenyInteractiveLogonRight
users:
- Guest
- Users
action: set
- name: Add account to Log on as a service
win_user_right:
name: SeServiceLogonRight
users:
- .\Administrator
- '{{ansible_hostname}}\local-user'
action: add
- name: Remove accounts who can create Symbolic links
win_user_right:
name: SeCreateSymbolicLinkPrivilege
users:
- SYSTEM
- Administrators
- DOMAIN\User
- group@DOMAIN.COM
action: remove
- name: Remove all accounts who cannot log on remote interactively
win_user_right:
name: SeDenyRemoteInteractiveLogonRight
users: []
"""
RETURN = r"""
added:
description: A list of accounts that were added to the right, this is empty
if no accounts were added.
returned: success
type: list
sample: ["NT AUTHORITY\\SYSTEM", "DOMAIN\\User"]
removed:
description: A list of accounts that were removed from the right, this is
empty if no accounts were removed.
returned: success
type: list
sample: ["SERVERNAME\\Administrator", "BUILTIN\\Administrators"]
"""
| 3,205
|
Python
|
.py
| 99
| 28.656566
| 92
| 0.711334
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,310
|
win_reboot.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_reboot.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_reboot
short_description: Reboot a windows machine
description:
- Reboot a Windows machine, wait for it to go down, come back up, and respond to commands.
- For non-Windows targets, use the M(reboot) module instead.
version_added: '2.1'
options:
pre_reboot_delay:
description:
- Seconds to wait before reboot. Passed as a parameter to the reboot command.
type: int
default: 2
aliases: [ pre_reboot_delay_sec ]
post_reboot_delay:
description:
- Seconds to wait after the reboot command was successful before attempting to validate the system rebooted successfully.
- This is useful if you want wait for something to settle despite your connection already working.
type: int
default: 0
version_added: '2.4'
aliases: [ post_reboot_delay_sec ]
shutdown_timeout:
description:
- Maximum seconds to wait for shutdown to occur.
- Increase this timeout for very slow hardware, large update applications, etc.
- This option has been removed since Ansible 2.5 as the win_reboot behavior has changed.
type: int
default: 600
aliases: [ shutdown_timeout_sec ]
reboot_timeout:
description:
- Maximum seconds to wait for machine to re-appear on the network and respond to a test command.
- This timeout is evaluated separately for both reboot verification and test command success so maximum clock time is actually twice this value.
type: int
default: 600
aliases: [ reboot_timeout_sec ]
connect_timeout:
description:
- Maximum seconds to wait for a single successful TCP connection to the WinRM endpoint before trying again.
type: int
default: 5
aliases: [ connect_timeout_sec ]
test_command:
description:
- Command to expect success for to determine the machine is ready for management.
type: str
default: whoami
msg:
description:
- Message to display to users.
type: str
default: Reboot initiated by Ansible
boot_time_command:
description:
- Command to run that returns a unique string indicating the last time the system was booted.
- Setting this to a command that has different output each time it is run will cause the task to fail.
type: str
default: '(Get-WmiObject -ClassName Win32_OperatingSystem).LastBootUpTime'
version_added: '2.10'
notes:
- If a shutdown was already scheduled on the system, C(win_reboot) will abort the scheduled shutdown and enforce its own shutdown.
- Beware that when C(win_reboot) returns, the Windows system may not have settled yet and some base services could be in limbo.
This can result in unexpected behavior. Check the examples for ways to mitigate this.
- The connection user must have the C(SeRemoteShutdownPrivilege) privilege enabled, see
U(https://docs.microsoft.com/en-us/windows/security/threat-protection/security-policy-settings/force-shutdown-from-a-remote-system)
for more information.
seealso:
- module: reboot
author:
- Matt Davis (@nitzmahone)
"""
EXAMPLES = r"""
- name: Reboot the machine with all defaults
win_reboot:
- name: Reboot a slow machine that might have lots of updates to apply
win_reboot:
reboot_timeout: 3600
# Install a Windows feature and reboot if necessary
- name: Install IIS Web-Server
win_feature:
name: Web-Server
register: iis_install
- name: Reboot when Web-Server feature requires it
win_reboot:
when: iis_install.reboot_required
# One way to ensure the system is reliable, is to set WinRM to a delayed startup
- name: Ensure WinRM starts when the system has settled and is ready to work reliably
win_service:
name: WinRM
start_mode: delayed
# Additionally, you can add a delay before running the next task
- name: Reboot a machine that takes time to settle after being booted
win_reboot:
post_reboot_delay: 120
# Or you can make win_reboot validate exactly what you need to work before running the next task
- name: Validate that the netlogon service has started, before running the next task
win_reboot:
test_command: 'exit (Get-Service -Name Netlogon).Status -ne "Running"'
"""
RETURN = r"""
rebooted:
description: True if the machine was rebooted.
returned: always
type: bool
sample: true
elapsed:
description: The number of seconds that elapsed waiting for the system to be rebooted.
returned: always
type: float
sample: 23.2
"""
| 4,690
|
Python
|
.py
| 119
| 35.789916
| 148
| 0.7449
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,311
|
win_acl.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_acl.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Phil Schwartz <schwartzmx@gmail.com>
# Copyright: (c) 2015, Trond Hindenes
# Copyright: (c) 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_acl
version_added: "2.0"
short_description: Set file/directory/registry permissions for a system user or group
description:
- Add or remove rights/permissions for a given user or group for the specified
file, folder, registry key or AppPool identifies.
options:
path:
description:
- The path to the file or directory.
type: str
required: yes
user:
description:
- User or Group to add specified rights to act on src file/folder or
registry key.
type: str
required: yes
state:
description:
- Specify whether to add C(present) or remove C(absent) the specified access rule.
type: str
choices: [ absent, present ]
default: present
type:
description:
- Specify whether to allow or deny the rights specified.
type: str
required: yes
choices: [ allow, deny ]
rights:
description:
- The rights/permissions that are to be allowed/denied for the specified
user or group for the item at C(path).
- If C(path) is a file or directory, rights can be any right under MSDN
FileSystemRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.filesystemrights.aspx).
- If C(path) is a registry key, rights can be any right under MSDN
RegistryRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.registryrights.aspx).
type: str
required: yes
inherit:
description:
- Inherit flags on the ACL rules.
- Can be specified as a comma separated list, e.g. C(ContainerInherit),
C(ObjectInherit).
- For more information on the choices see MSDN InheritanceFlags enumeration
at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.inheritanceflags.aspx).
- Defaults to C(ContainerInherit, ObjectInherit) for Directories.
type: str
choices: [ ContainerInherit, ObjectInherit ]
propagation:
description:
- Propagation flag on the ACL rules.
- For more information on the choices see MSDN PropagationFlags enumeration
at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.propagationflags.aspx).
type: str
choices: [ InheritOnly, None, NoPropagateInherit ]
default: "None"
notes:
- If adding ACL's for AppPool identities (available since 2.3), the Windows
Feature "Web-Scripting-Tools" must be enabled.
seealso:
- module: win_acl_inheritance
- module: win_file
- module: win_owner
- module: win_stat
author:
- Phil Schwartz (@schwartzmx)
- Trond Hindenes (@trondhindenes)
- Hans-Joachim Kliemeck (@h0nIg)
"""
EXAMPLES = r"""
- name: Restrict write and execute access to User Fed-Phil
win_acl:
user: Fed-Phil
path: C:\Important\Executable.exe
type: deny
rights: ExecuteFile,Write
- name: Add IIS_IUSRS allow rights
win_acl:
path: C:\inetpub\wwwroot\MySite
user: IIS_IUSRS
rights: FullControl
type: allow
state: present
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
- name: Set registry key right
win_acl:
path: HKCU:\Bovine\Key
user: BUILTIN\Users
rights: EnumerateSubKeys
type: allow
state: present
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
- name: Remove FullControl AccessRule for IIS_IUSRS
win_acl:
path: C:\inetpub\wwwroot\MySite
user: IIS_IUSRS
rights: FullControl
type: allow
state: absent
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
- name: Deny Intern
win_acl:
path: C:\Administrator\Documents
user: Intern
rights: Read,Write,Modify,FullControl,Delete
type: deny
state: present
"""
| 4,112
|
Python
|
.py
| 124
| 29.048387
| 119
| 0.721357
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,312
|
win_wait_for.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_wait_for.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub, actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r"""
---
module: win_wait_for
version_added: '2.4'
short_description: Waits for a condition before continuing
description:
- You can wait for a set amount of time C(timeout), this is the default if
nothing is specified.
- Waiting for a port to become available is useful for when services are not
immediately available after their init scripts return which is true of
certain Java application servers.
- You can wait for a file to exist or not exist on the filesystem.
- This module can also be used to wait for a regex match string to be present
in a file.
- You can wait for active connections to be closed before continuing on a
local port.
options:
connect_timeout:
description:
- The maximum number of seconds to wait for a connection to happen before
closing and retrying.
type: int
default: 5
delay:
description:
- The number of seconds to wait before starting to poll.
type: int
exclude_hosts:
description:
- The list of hosts or IPs to ignore when looking for active TCP
connections when C(state=drained).
type: list
host:
description:
- A resolvable hostname or IP address to wait for.
- If C(state=drained) then it will only check for connections on the IP
specified, you can use '0.0.0.0' to use all host IPs.
type: str
default: '127.0.0.1'
path:
description:
- The path to a file on the filesystem to check.
- If C(state) is present or started then it will wait until the file
exists.
- If C(state) is absent then it will wait until the file does not exist.
type: path
port:
description:
- The port number to poll on C(host).
type: int
regex:
description:
- Can be used to match a string in a file.
- If C(state) is present or started then it will wait until the regex
matches.
- If C(state) is absent then it will wait until the regex does not match.
- Defaults to a multiline regex.
type: str
aliases: [ "search_regex", "regexp" ]
sleep:
description:
- Number of seconds to sleep between checks.
type: int
default: 1
state:
description:
- When checking a port, C(started) will ensure the port is open, C(stopped)
will check that is it closed and C(drained) will check for active
connections.
- When checking for a file or a search string C(present) or C(started) will
ensure that the file or string is present, C(absent) will check that the
file or search string is absent or removed.
type: str
choices: [ absent, drained, present, started, stopped ]
default: started
timeout:
description:
- The maximum number of seconds to wait for.
type: int
default: 300
seealso:
- module: wait_for
- module: win_wait_for_process
author:
- Jordan Borean (@jborean93)
"""
EXAMPLES = r"""
- name: Wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds
win_wait_for:
port: 8000
delay: 10
- name: Wait 150 seconds for port 8000 of any IP to close active connections
win_wait_for:
host: 0.0.0.0
port: 8000
state: drained
timeout: 150
- name: Wait for port 8000 of any IP to close active connection, ignoring certain hosts
win_wait_for:
host: 0.0.0.0
port: 8000
state: drained
exclude_hosts: ['10.2.1.2', '10.2.1.3']
- name: Wait for file C:\temp\log.txt to exist before continuing
win_wait_for:
path: C:\temp\log.txt
- name: Wait until process complete is in the file before continuing
win_wait_for:
path: C:\temp\log.txt
regex: process complete
- name: Wait until file is removed
win_wait_for:
path: C:\temp\log.txt
state: absent
- name: Wait until port 1234 is offline but try every 10 seconds
win_wait_for:
port: 1234
state: absent
sleep: 10
"""
RETURN = r"""
wait_attempts:
description: The number of attempts to poll the file or port before module
finishes.
returned: always
type: int
sample: 1
elapsed:
description: The elapsed seconds between the start of poll and the end of the
module. This includes the delay if the option is set.
returned: always
type: float
sample: 2.1406487
"""
| 4,634
|
Python
|
.py
| 143
| 28.454545
| 102
| 0.704845
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,313
|
win_lineinfile.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_lineinfile.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r"""
---
module: win_lineinfile
short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression
description:
- This module will search a file for a line, and ensure that it is present or absent.
- This is primarily useful when you want to change a single line in a file only.
version_added: "2.0"
options:
path:
description:
- The path of the file to modify.
- Note that the Windows path delimiter C(\) must be escaped as C(\\) when the line is double quoted.
- Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
type: path
required: yes
aliases: [ dest, destfile, name ]
backup:
description:
- Determine whether a backup should be created.
- When set to C(yes), create a backup file including the timestamp information
so you can get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
regex:
description:
- The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found
will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions;
see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx).
aliases: [ "regexp" ]
state:
description:
- Whether the line should be there or not.
type: str
choices: [ absent, present ]
default: present
line:
description:
- Required for C(state=present). The line to insert/replace into the file. If C(backrefs) is set, may contain backreferences that will get
expanded with the C(regexp) capture groups if the regexp matches.
- Be aware that the line is processed first on the controller and thus is dependent on yaml quoting rules. Any double quoted line
will have control characters, such as '\r\n', expanded. To print such characters literally, use single or no quotes.
type: str
backrefs:
description:
- Used with C(state=present). If set, line can contain backreferences (both positional and named) that will get populated if the C(regexp)
matches. This flag changes the operation of the module slightly; C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
doesn't match anywhere in the file, the file will be left unchanged.
- If the C(regexp) does match, the last matching line will be replaced by the expanded line parameter.
type: bool
default: no
insertafter:
description:
- Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is
available; C(EOF) for inserting the line at the end of the file.
- If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs).
type: str
choices: [ EOF, '*regex*' ]
default: EOF
insertbefore:
description:
- Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available;
C(BOF) for inserting the line at the beginning of the file.
- If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs).
type: str
choices: [ BOF, '*regex*' ]
create:
description:
- Used with C(state=present). If specified, the file will be created if it does not already exist. By default it will fail if the file is missing.
type: bool
default: no
validate:
description:
- Validation to run before copying into place. Use %s in the command to indicate the current file to validate.
- The command is passed securely so shell features like expansion and pipes won't work.
type: str
encoding:
description:
- Specifies the encoding of the source text file to operate on (and thus what the output encoding will be). The default of C(auto) will cause
the module to auto-detect the encoding of the source file and ensure that the modified file is written with the same encoding.
- An explicit encoding can be passed as a string that is a valid value to pass to the .NET framework System.Text.Encoding.GetEncoding() method -
see U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx).
- This is mostly useful with C(create=yes) if you want to create a new file with a specific encoding. If C(create=yes) is specified without a
specific encoding, the default encoding (UTF-8, no BOM) will be used.
type: str
default: auto
newline:
description:
- Specifies the line separator style to use for the modified file. This defaults to the windows line separator (C(\r\n)). Note that the indicated
line separator will be used for file output regardless of the original line separator that appears in the input file.
type: str
choices: [ unix, windows ]
default: windows
notes:
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
seealso:
- module: assemble
- module: lineinfile
author:
- Brian Lloyd (@brianlloyd)
"""
EXAMPLES = r"""
# Before Ansible 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
- name: Insert path without converting \r\n
win_lineinfile:
path: c:\file.txt
line: c:\return\new
- win_lineinfile:
path: C:\Temp\example.conf
regex: '^name='
line: 'name=JohnDoe'
- win_lineinfile:
path: C:\Temp\example.conf
regex: '^name='
state: absent
- win_lineinfile:
path: C:\Temp\example.conf
regex: '^127\.0\.0\.1'
line: '127.0.0.1 localhost'
- win_lineinfile:
path: C:\Temp\httpd.conf
regex: '^Listen '
insertafter: '^#Listen '
line: Listen 8080
- win_lineinfile:
path: C:\Temp\services
regex: '^# port for http'
insertbefore: '^www.*80/tcp'
line: '# port for http by default'
- name: Create file if it doesn't exist with a specific encoding
win_lineinfile:
path: C:\Temp\utf16.txt
create: yes
encoding: utf-16
line: This is a utf-16 encoded file
- name: Add a line to a file and ensure the resulting file uses unix line separators
win_lineinfile:
path: C:\Temp\testfile.txt
line: Line added to file
newline: unix
- name: Update a line using backrefs
win_lineinfile:
path: C:\Temp\example.conf
backrefs: yes
regex: '(^name=)'
line: '$1JohnDoe'
"""
RETURN = r"""
backup:
description:
- Name of the backup file that was created.
- This is now deprecated, use C(backup_file) instead.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
backup_file:
description: Name of the backup file that was created.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
"""
| 7,333
|
Python
|
.py
| 167
| 39.227545
| 152
| 0.709073
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,314
|
win_regedit.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_regedit.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Adam Keech <akeech@chathamfinancial.com>
# Copyright: (c) 2015, Josh Ludwig <jludwig@chathamfinancial.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_regedit
version_added: '2.0'
short_description: Add, change, or remove registry keys and values
description:
- Add, modify or remove registry keys and values.
- More information about the windows registry from Wikipedia
U(https://en.wikipedia.org/wiki/Windows_Registry).
options:
path:
description:
- Name of the registry path.
- 'Should be in one of the following registry hives: HKCC, HKCR, HKCU,
HKLM, HKU.'
type: str
required: yes
aliases: [ key ]
name:
description:
- Name of the registry entry in the above C(path) parameters.
- If not provided, or empty then the '(Default)' property for the key will
be used.
type: str
aliases: [ entry, value ]
data:
description:
- Value of the registry entry C(name) in C(path).
- If not specified then the value for the property will be null for the
corresponding C(type).
- Binary and None data should be expressed in a yaml byte array or as comma
separated hex values.
- An easy way to generate this is to run C(regedit.exe) and use the
I(export) option to save the registry values to a file.
- In the exported file, binary value will look like C(hex:be,ef,be,ef), the
C(hex:) prefix is optional.
- DWORD and QWORD values should either be represented as a decimal number
or a hex value.
- Multistring values should be passed in as a list.
- See the examples for more details on how to format this data.
type: str
type:
description:
- The registry value data type.
type: str
choices: [ binary, dword, expandstring, multistring, string, qword ]
default: string
aliases: [ datatype ]
state:
description:
- The state of the registry entry.
type: str
choices: [ absent, present ]
default: present
delete_key:
description:
- When C(state) is 'absent' then this will delete the entire key.
- If C(no) then it will only clear out the '(Default)' property for
that key.
type: bool
default: yes
version_added: '2.4'
hive:
description:
- A path to a hive key like C:\Users\Default\NTUSER.DAT to load in the
registry.
- This hive is loaded under the HKLM:\ANSIBLE key which can then be used
in I(name) like any other path.
- This can be used to load the default user profile registry hive or any
other hive saved as a file.
- Using this function requires the user to have the C(SeRestorePrivilege)
and C(SeBackupPrivilege) privileges enabled.
type: path
version_added: '2.5'
notes:
- Check-mode C(-C/--check) and diff output C(-D/--diff) are supported, so that you can test every change against the active configuration before
applying changes.
- Beware that some registry hives (C(HKEY_USERS) in particular) do not allow to create new registry paths in the root folder.
- Since ansible 2.4, when checking if a string registry value has changed, a case-sensitive test is used. Previously the test was case-insensitive.
seealso:
- module: win_reg_stat
- module: win_regmerge
author:
- Adam Keech (@smadam813)
- Josh Ludwig (@joshludwig)
- Jordan Borean (@jborean93)
"""
EXAMPLES = r"""
- name: Create registry path MyCompany
win_regedit:
path: HKCU:\Software\MyCompany
- name: Add or update registry path MyCompany, with entry 'hello', and containing 'world'
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: world
- name: Add or update registry path MyCompany, with dword entry 'hello', and containing 1337 as the decimal value
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: 1337
type: dword
- name: Add or update registry path MyCompany, with dword entry 'hello', and containing 0xff2500ae as the hex value
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: 0xff2500ae
type: dword
- name: Add or update registry path MyCompany, with binary entry 'hello', and containing binary data in hex-string format
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: hex:be,ef,be,ef,be,ef,be,ef,be,ef
type: binary
- name: Add or update registry path MyCompany, with binary entry 'hello', and containing binary data in yaml format
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: [0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef]
type: binary
- name: Add or update registry path MyCompany, with expand string entry 'hello'
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: '%appdata%\local'
type: expandstring
- name: Add or update registry path MyCompany, with multi string entry 'hello'
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: ['hello', 'world']
type: multistring
- name: Disable keyboard layout hotkey for all users (changes existing)
win_regedit:
path: HKU:\.DEFAULT\Keyboard Layout\Toggle
name: Layout Hotkey
data: 3
type: dword
- name: Disable language hotkey for current users (adds new)
win_regedit:
path: HKCU:\Keyboard Layout\Toggle
name: Language Hotkey
data: 3
type: dword
- name: Remove registry path MyCompany (including all entries it contains)
win_regedit:
path: HKCU:\Software\MyCompany
state: absent
delete_key: yes
- name: Clear the existing (Default) entry at path MyCompany
win_regedit:
path: HKCU:\Software\MyCompany
state: absent
delete_key: no
- name: Remove entry 'hello' from registry path MyCompany
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
state: absent
- name: Change default mouse trailing settings for new users
win_regedit:
path: HKLM:\ANSIBLE\Control Panel\Mouse
name: MouseTrails
data: 10
type: str
state: present
hive: C:\Users\Default\NTUSER.dat
"""
RETURN = r"""
data_changed:
description: Whether this invocation changed the data in the registry value.
returned: success
type: bool
sample: false
data_type_changed:
description: Whether this invocation changed the datatype of the registry value.
returned: success
type: bool
sample: true
"""
| 6,698
|
Python
|
.py
| 190
| 31.126316
| 147
| 0.714396
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,315
|
win_shell.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_shell.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_shell
short_description: Execute shell commands on target hosts
version_added: 2.2
description:
- The C(win_shell) module takes the command name followed by a list of space-delimited arguments.
It is similar to the M(win_command) module, but runs
the command via a shell (defaults to PowerShell) on the target host.
- For non-Windows targets, use the M(shell) module instead.
options:
free_form:
description:
- The C(win_shell) module takes a free form command to run.
- There is no parameter actually named 'free form'. See the examples!
type: str
required: yes
creates:
description:
- A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
type: path
removes:
description:
- A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
type: path
chdir:
description:
- Set the specified path as the current working directory before executing a command
type: path
executable:
description:
- Change the shell used to execute the command (eg, C(cmd)).
- The target shell must accept a C(/c) parameter followed by the raw command line to be executed.
type: path
stdin:
description:
- Set the stdin of the command directly to the specified value.
type: str
version_added: '2.5'
no_profile:
description:
- Do not load the user profile before running a command. This is only valid
when using PowerShell as the executable.
type: bool
default: no
version_added: '2.8'
output_encoding_override:
description:
- This option overrides the encoding of stdout/stderr output.
- You can use this option when you need to run a command which ignore the console's codepage.
- You should only need to use this option in very rare circumstances.
- This value can be any valid encoding C(Name) based on the output of C([System.Text.Encoding]::GetEncodings()).
See U(https://docs.microsoft.com/dotnet/api/system.text.encoding.getencodings).
type: str
version_added: '2.10'
notes:
- If you want to run an executable securely and predictably, it may be
better to use the M(win_command) module instead. Best practices when writing
playbooks will follow the trend of using M(win_command) unless C(win_shell) is
explicitly required. When running ad-hoc commands, use your best judgement.
- WinRM will not return from a command execution until all child processes created have exited.
Thus, it is not possible to use C(win_shell) to spawn long-running child or background processes.
Consider creating a Windows service for managing background processes.
seealso:
- module: psexec
- module: raw
- module: script
- module: shell
- module: win_command
- module: win_psexec
author:
- Matt Davis (@nitzmahone)
"""
EXAMPLES = r"""
# Execute a command in the remote shell; stdout goes to the specified
# file on the remote.
- win_shell: C:\somescript.ps1 >> C:\somelog.txt
# Change the working directory to somedir/ before executing the command.
- win_shell: C:\somescript.ps1 >> C:\somelog.txt chdir=C:\somedir
# You can also use the 'args' form to provide the options. This command
# will change the working directory to somedir/ and will only run when
# somedir/somelog.txt doesn't exist.
- win_shell: C:\somescript.ps1 >> C:\somelog.txt
args:
chdir: C:\somedir
creates: C:\somelog.txt
# Run a command under a non-Powershell interpreter (cmd in this case)
- win_shell: echo %HOMEDIR%
args:
executable: cmd
register: homedir_out
- name: Run multi-lined shell commands
win_shell: |
$value = Test-Path -Path C:\temp
if ($value) {
Remove-Item -Path C:\temp -Force
}
New-Item -Path C:\temp -ItemType Directory
- name: Retrieve the input based on stdin
win_shell: '$string = [Console]::In.ReadToEnd(); Write-Output $string.Trim()'
args:
stdin: Input message
"""
RETURN = r"""
msg:
description: Changed.
returned: always
type: bool
sample: true
start:
description: The command execution start time.
returned: always
type: str
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time.
returned: always
type: str
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time.
returned: always
type: str
sample: '0:00:00.325771'
stdout:
description: The command standard output.
returned: always
type: str
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error.
returned: always
type: str
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task.
returned: always
type: str
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success).
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines.
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
"""
| 5,618
|
Python
|
.py
| 157
| 31.675159
| 127
| 0.71308
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,316
|
win_copy.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_copy.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_copy
version_added: '1.9.2'
short_description: Copies files to remote locations on windows hosts
description:
- The C(win_copy) module copies a file on the local box to remote windows locations.
- For non-Windows targets, use the M(copy) module instead.
options:
content:
description:
- When used instead of C(src), sets the contents of a file directly to the
specified value.
- This is for simple values, for anything complex or with formatting please
switch to the M(template) module.
type: str
version_added: '2.3'
decrypt:
description:
- This option controls the autodecryption of source files using vault.
type: bool
default: yes
version_added: '2.5'
dest:
description:
- Remote absolute path where the file should be copied to.
- If C(src) is a directory, this must be a directory too.
- Use \ for path separators or \\ when in "double quotes".
- If C(dest) ends with \ then source or the contents of source will be
copied to the directory without renaming.
- If C(dest) is a nonexistent path, it will only be created if C(dest) ends
with "/" or "\", or C(src) is a directory.
- If C(src) and C(dest) are files and if the parent directory of C(dest)
doesn't exist, then the task will fail.
type: path
required: yes
backup:
description:
- Determine whether a backup should be created.
- When set to C(yes), create a backup file including the timestamp information
so you can get the original file back if you somehow clobbered it incorrectly.
- No backup is taken when C(remote_src=False) and multiple files are being
copied.
type: bool
default: no
version_added: '2.8'
force:
description:
- If set to C(yes), the file will only be transferred if the content
is different than destination.
- If set to C(no), the file will only be transferred if the
destination does not exist.
- If set to C(no), no checksumming of the content is performed which can
help improve performance on larger files.
type: bool
default: yes
version_added: '2.3'
local_follow:
description:
- This flag indicates that filesystem links in the source tree, if they
exist, should be followed.
type: bool
default: yes
version_added: '2.4'
remote_src:
description:
- If C(no), it will search for src at originating/master machine.
- If C(yes), it will go to the remote/target machine for the src.
type: bool
default: no
version_added: '2.3'
src:
description:
- Local path to a file to copy to the remote server; can be absolute or
relative.
- If path is a directory, it is copied (including the source folder name)
recursively to C(dest).
- If path is a directory and ends with "/", only the inside contents of
that directory are copied to the destination. Otherwise, if it does not
end with "/", the directory itself with all contents is copied.
- If path is a file and dest ends with "\", the file is copied to the
folder with the same filename.
- Required unless using C(content).
type: path
notes:
- Currently win_copy does not support copying symbolic links from both local to
remote and remote to remote.
- It is recommended that backslashes C(\) are used instead of C(/) when dealing
with remote paths.
- Because win_copy runs over WinRM, it is not a very efficient transfer
mechanism. If sending large files consider hosting them on a web service and
using M(win_get_url) instead.
seealso:
- module: assemble
- module: copy
- module: win_get_url
- module: win_robocopy
author:
- Jon Hawkesworth (@jhawkesworth)
- Jordan Borean (@jborean93)
"""
EXAMPLES = r"""
- name: Copy a single file
win_copy:
src: /srv/myfiles/foo.conf
dest: C:\Temp\renamed-foo.conf
- name: Copy a single file, but keep a backup
win_copy:
src: /srv/myfiles/foo.conf
dest: C:\Temp\renamed-foo.conf
backup: yes
- name: Copy a single file keeping the filename
win_copy:
src: /src/myfiles/foo.conf
dest: C:\Temp\
- name: Copy folder to C:\Temp (results in C:\Temp\temp_files)
win_copy:
src: files/temp_files
dest: C:\Temp
- name: Copy folder contents recursively
win_copy:
src: files/temp_files/
dest: C:\Temp
- name: Copy a single file where the source is on the remote host
win_copy:
src: C:\Temp\foo.txt
dest: C:\ansible\foo.txt
remote_src: yes
- name: Copy a folder recursively where the source is on the remote host
win_copy:
src: C:\Temp
dest: C:\ansible
remote_src: yes
- name: Set the contents of a file
win_copy:
content: abc123
dest: C:\Temp\foo.txt
- name: Copy a single file as another user
win_copy:
src: NuGet.config
dest: '%AppData%\NuGet\NuGet.config'
vars:
ansible_become_user: user
ansible_become_password: pass
# The tmp dir must be set when using win_copy as another user
# This ensures the become user will have permissions for the operation
# Make sure to specify a folder both the ansible_user and the become_user have access to (i.e not %TEMP% which is user specific and requires Admin)
ansible_remote_tmp: 'c:\tmp'
"""
RETURN = r"""
backup_file:
description: Name of the backup file that was created.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
dest:
description: Destination file/path.
returned: changed
type: str
sample: C:\Temp\
src:
description: Source file used for the copy on the target machine.
returned: changed
type: str
sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
checksum:
description: SHA1 checksum of the file after running copy.
returned: success, src is a file
type: str
sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
size:
description: Size of the target, after execution.
returned: changed, src is a file
type: int
sample: 1220
operation:
description: Whether a single file copy took place or a folder copy.
returned: success
type: str
sample: file_copy
original_basename:
description: Basename of the copied file.
returned: changed, src is a file
type: str
sample: foo.txt
"""
| 6,765
|
Python
|
.py
| 194
| 30.690722
| 151
| 0.705703
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,317
|
win_command.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_command.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_command
short_description: Executes a command on a remote Windows node
version_added: 2.2
description:
- The C(win_command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($env:HOME) and operations
like C("<"), C(">"), C("|"), and C(";") will not work (use the M(win_shell)
module if you need these features).
- For non-Windows targets, use the M(command) module instead.
options:
free_form:
description:
- The C(win_command) module takes a free form command to run.
- There is no parameter actually named 'free form'. See the examples!
type: str
required: yes
creates:
description:
- A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
type: path
removes:
description:
- A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
type: path
chdir:
description:
- Set the specified path as the current working directory before executing a command.
type: path
stdin:
description:
- Set the stdin of the command directly to the specified value.
type: str
version_added: '2.5'
output_encoding_override:
description:
- This option overrides the encoding of stdout/stderr output.
- You can use this option when you need to run a command which ignore the console's codepage.
- You should only need to use this option in very rare circumstances.
- This value can be any valid encoding C(Name) based on the output of C([System.Text.Encoding]::GetEncodings()).
See U(https://docs.microsoft.com/dotnet/api/system.text.encoding.getencodings).
type: str
version_added: '2.10'
notes:
- If you want to run a command through a shell (say you are using C(<),
C(>), C(|), etc), you actually want the M(win_shell) module instead. The
C(win_command) module is much more secure as it's not affected by the user's
environment.
- C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not
exist, use this.
seealso:
- module: command
- module: psexec
- module: raw
- module: win_psexec
- module: win_shell
author:
- Matt Davis (@nitzmahone)
"""
EXAMPLES = r"""
- name: Save the result of 'whoami' in 'whoami_out'
win_command: whoami
register: whoami_out
- name: Run command that only runs if folder exists and runs from a specific folder
win_command: wbadmin -backupTarget:C:\backup\
args:
chdir: C:\somedir\
creates: C:\backup\
- name: Run an executable and send data to the stdin for the executable
win_command: powershell.exe -
args:
stdin: Write-Host test
"""
RETURN = r"""
msg:
description: changed
returned: always
type: bool
sample: true
start:
description: The command execution start time
returned: always
type: str
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time
returned: always
type: str
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time
returned: always
type: str
sample: '0:00:00.325771'
stdout:
description: The command standard output
returned: always
type: str
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error
returned: always
type: str
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: str
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
"""
| 4,500
|
Python
|
.py
| 129
| 30.666667
| 153
| 0.703483
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,318
|
win_get_url.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_get_url.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Paul Durivage <paul.durivage@rackspace.com>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_get_url
version_added: "1.7"
short_description: Downloads file from HTTP, HTTPS, or FTP to node
description:
- Downloads files from HTTP, HTTPS, or FTP to the remote server.
- The remote server I(must) have direct access to the remote resource.
- For non-Windows targets, use the M(get_url) module instead.
options:
url:
description:
- The full URL of a file to download.
type: str
required: yes
dest:
description:
- The location to save the file at the URL.
- Be sure to include a filename and extension as appropriate.
type: path
required: yes
force:
description:
- If C(yes), will download the file every time and replace the file if the contents change. If C(no), will only
download the file if it does not exist or the remote file has been
modified more recently than the local file.
- This works by sending an http HEAD request to retrieve last modified
time of the requested resource, so for this to work, the remote web
server must support HEAD requests.
type: bool
default: yes
version_added: "2.0"
checksum:
description:
- If a I(checksum) is passed to this parameter, the digest of the
destination file will be calculated after it is downloaded to ensure
its integrity and verify that the transfer completed successfully.
- This option cannot be set with I(checksum_url).
type: str
version_added: "2.8"
checksum_algorithm:
description:
- Specifies the hashing algorithm used when calculating the checksum of
the remote and destination file.
type: str
choices:
- md5
- sha1
- sha256
- sha384
- sha512
default: sha1
version_added: "2.8"
checksum_url:
description:
- Specifies a URL that contains the checksum values for the resource at
I(url).
- Like C(checksum), this is used to verify the integrity of the remote
transfer.
- This option cannot be set with I(checksum).
type: str
version_added: "2.8"
url_username:
description:
- The username to use for authentication.
- The aliases I(user) and I(username) are deprecated and will be removed in
Ansible 2.14.
aliases:
- user
- username
url_password:
description:
- The password for I(url_username).
- The alias I(password) is deprecated and will be removed in Ansible 2.14.
aliases:
- password
proxy_url:
version_added: "2.0"
proxy_username:
version_added: "2.0"
proxy_password:
version_added: "2.0"
headers:
version_added: "2.4"
use_proxy:
version_added: "2.4"
follow_redirects:
version_added: "2.9"
maximum_redirection:
version_added: "2.9"
client_cert:
version_added: "2.9"
client_cert_password:
version_added: "2.9"
method:
description:
- This option is not for use with C(win_get_url) and should be ignored.
version_added: "2.9"
notes:
- If your URL includes an escaped slash character (%2F) this module will convert it to a real slash.
This is a result of the behaviour of the System.Uri class as described in
L(the documentation,https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/network/schemesettings-element-uri-settings#remarks).
- Since Ansible 2.8, the module will skip reporting a change if the remote
checksum is the same as the local local even when C(force=yes). This is to
better align with M(get_url).
extends_documentation_fragment:
- url_windows
seealso:
- module: get_url
- module: uri
- module: win_uri
author:
- Paul Durivage (@angstwad)
- Takeshi Kuramochi (@tksarah)
"""
EXAMPLES = r"""
- name: Download earthrise.jpg to specified path
win_get_url:
url: http://www.example.com/earthrise.jpg
dest: C:\Users\RandomUser\earthrise.jpg
- name: Download earthrise.jpg to specified path only if modified
win_get_url:
url: http://www.example.com/earthrise.jpg
dest: C:\Users\RandomUser\earthrise.jpg
force: no
- name: Download earthrise.jpg to specified path through a proxy server.
win_get_url:
url: http://www.example.com/earthrise.jpg
dest: C:\Users\RandomUser\earthrise.jpg
proxy_url: http://10.0.0.1:8080
proxy_username: username
proxy_password: password
- name: Download file from FTP with authentication
win_get_url:
url: ftp://server/file.txt
dest: '%TEMP%\ftp-file.txt'
url_username: ftp-user
url_password: ftp-password
- name: Download src with sha256 checksum url
win_get_url:
url: http://www.example.com/earthrise.jpg
dest: C:\temp\earthrise.jpg
checksum_url: http://www.example.com/sha256sum.txt
checksum_algorithm: sha256
force: True
- name: Download src with sha256 checksum url
win_get_url:
url: http://www.example.com/earthrise.jpg
dest: C:\temp\earthrise.jpg
checksum: a97e6837f60cec6da4491bab387296bbcd72bdba
checksum_algorithm: sha1
force: True
"""
RETURN = r"""
dest:
description: destination file/path
returned: always
type: str
sample: C:\Users\RandomUser\earthrise.jpg
checksum_dest:
description: <algorithm> checksum of the file after the download
returned: success and dest has been downloaded
type: str
sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
checksum_src:
description: <algorithm> checksum of the remote resource
returned: force=yes or dest did not exist
type: str
sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
elapsed:
description: The elapsed seconds between the start of poll and the end of the module.
returned: always
type: float
sample: 2.1406487
size:
description: size of the dest file
returned: success
type: int
sample: 1220
url:
description: requested url
returned: always
type: str
sample: http://www.example.com/earthrise.jpg
msg:
description: Error message, or HTTP status message from web-server
returned: always
type: str
sample: OK
status_code:
description: HTTP status code
returned: always
type: int
sample: 200
"""
| 6,591
|
Python
|
.py
| 204
| 28.058824
| 152
| 0.7133
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,319
|
win_whoami.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_whoami.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r"""
---
module: win_whoami
version_added: "2.5"
short_description: Get information about the current user and process
description:
- Designed to return the same information as the C(whoami /all) command.
- Also includes information missing from C(whoami) such as logon metadata like
logon rights, id, type.
notes:
- If running this module with a non admin user, the logon rights will be an
empty list as Administrator rights are required to query LSA for the
information.
seealso:
- module: win_credential
- module: win_group_membership
- module: win_user_right
author:
- Jordan Borean (@jborean93)
"""
EXAMPLES = r"""
- name: Get whoami information
win_whoami:
"""
RETURN = r"""
authentication_package:
description: The name of the authentication package used to authenticate the
user in the session.
returned: success
type: str
sample: Negotiate
user_flags:
description: The user flags for the logon session, see UserFlags in
U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa380128).
returned: success
type: str
sample: Winlogon
upn:
description: The user principal name of the current user.
returned: success
type: str
sample: Administrator@DOMAIN.COM
logon_type:
description: The logon type that identifies the logon method, see
U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa380129.aspx).
returned: success
type: str
sample: Network
privileges:
description: A dictionary of privileges and their state on the logon token.
returned: success
type: dict
sample: {
"SeChangeNotifyPrivileges": "enabled-by-default",
"SeRemoteShutdownPrivilege": "disabled",
"SeDebugPrivilege": "enabled"
}
label:
description: The mandatory label set to the logon session.
returned: success
type: complex
contains:
domain_name:
description: The domain name of the label SID.
returned: success
type: str
sample: Mandatory Label
sid:
description: The SID in string form.
returned: success
type: str
sample: S-1-16-12288
account_name:
description: The account name of the label SID.
returned: success
type: str
sample: High Mandatory Level
type:
description: The type of SID.
returned: success
type: str
sample: Label
impersonation_level:
description: The impersonation level of the token, only valid if
C(token_type) is C(TokenImpersonation), see
U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa379572.aspx).
returned: success
type: str
sample: SecurityAnonymous
login_time:
description: The logon time in ISO 8601 format
returned: success
type: str
sample: '2017-11-27T06:24:14.3321665+10:00'
groups:
description: A list of groups and attributes that the user is a member of.
returned: success
type: list
sample: [
{
"account_name": "Domain Users",
"domain_name": "DOMAIN",
"attributes": [
"Mandatory",
"Enabled by default",
"Enabled"
],
"sid": "S-1-5-21-1654078763-769949647-2968445802-513",
"type": "Group"
},
{
"account_name": "Administrators",
"domain_name": "BUILTIN",
"attributes": [
"Mandatory",
"Enabled by default",
"Enabled",
"Owner"
],
"sid": "S-1-5-32-544",
"type": "Alias"
}
]
account:
description: The running account SID details.
returned: success
type: complex
contains:
domain_name:
description: The domain name of the account SID.
returned: success
type: str
sample: DOMAIN
sid:
description: The SID in string form.
returned: success
type: str
sample: S-1-5-21-1654078763-769949647-2968445802-500
account_name:
description: The account name of the account SID.
returned: success
type: str
sample: Administrator
type:
description: The type of SID.
returned: success
type: str
sample: User
login_domain:
description: The name of the domain used to authenticate the owner of the
session.
returned: success
type: str
sample: DOMAIN
rights:
description: A list of logon rights assigned to the logon.
returned: success and running user is a member of the local Administrators group
type: list
sample: [
"SeNetworkLogonRight",
"SeInteractiveLogonRight",
"SeBatchLogonRight",
"SeRemoteInteractiveLogonRight"
]
logon_server:
description: The name of the server used to authenticate the owner of the
logon session.
returned: success
type: str
sample: DC01
logon_id:
description: The unique identifier of the logon session.
returned: success
type: int
sample: 20470143
dns_domain_name:
description: The DNS name of the logon session, this is an empty string if
this is not set.
returned: success
type: str
sample: DOMAIN.COM
token_type:
description: The token type to indicate whether it is a primary or
impersonation token.
returned: success
type: str
sample: TokenPrimary
"""
| 5,621
|
Python
|
.py
| 197
| 23.817259
| 92
| 0.693983
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,320
|
win_stat.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_stat.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_stat
version_added: "1.7"
short_description: Get information about Windows files
description:
- Returns information about a Windows file.
- For non-Windows targets, use the M(stat) module instead.
options:
path:
description:
- The full path of the file/object to get the facts of; both forward and
back slashes are accepted.
type: path
required: yes
aliases: [ dest, name ]
get_checksum:
description:
- Whether to return a checksum of the file (default sha1)
type: bool
default: yes
version_added: "2.1"
checksum_algorithm:
description:
- Algorithm to determine checksum of file.
- Will throw an error if the host is unable to use specified algorithm.
type: str
default: sha1
choices: [ md5, sha1, sha256, sha384, sha512 ]
version_added: "2.3"
follow:
description:
- Whether to follow symlinks or junction points.
- In the case of C(path) pointing to another link, then that will
be followed until no more links are found.
type: bool
default: no
version_added: "2.8"
seealso:
- module: stat
- module: win_acl
- module: win_file
- module: win_owner
author:
- Chris Church (@cchurch)
"""
EXAMPLES = r"""
- name: Obtain information about a file
win_stat:
path: C:\foo.ini
register: file_info
- name: Obtain information about a folder
win_stat:
path: C:\bar
register: folder_info
- name: Get MD5 checksum of a file
win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: md5
register: md5_checksum
- debug:
var: md5_checksum.stat.checksum
- name: Get SHA1 checksum of file
win_stat:
path: C:\foo.ini
get_checksum: yes
register: sha1_checksum
- debug:
var: sha1_checksum.stat.checksum
- name: Get SHA256 checksum of file
win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: sha256
register: sha256_checksum
- debug:
var: sha256_checksum.stat.checksum
"""
RETURN = r"""
changed:
description: Whether anything was changed
returned: always
type: bool
sample: true
stat:
description: dictionary containing all the stat data
returned: success
type: complex
contains:
attributes:
description: Attributes of the file at path in raw form.
returned: success, path exists
type: str
sample: "Archive, Hidden"
checksum:
description: The checksum of a file based on checksum_algorithm specified.
returned: success, path exist, path is a file, get_checksum == True
checksum_algorithm specified is supported
type: str
sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
creationtime:
description: The create time of the file represented in seconds since epoch.
returned: success, path exists
type: float
sample: 1477984205.15
exists:
description: If the path exists or not.
returned: success
type: bool
sample: true
extension:
description: The extension of the file at path.
returned: success, path exists, path is a file
type: str
sample: ".ps1"
filename:
description: The name of the file (without path).
returned: success, path exists, path is a file
type: str
sample: foo.ini
hlnk_targets:
description: List of other files pointing to the same file (hard links), excludes the current file.
returned: success, path exists
type: list
sample:
- C:\temp\file.txt
- C:\Windows\update.log
isarchive:
description: If the path is ready for archiving or not.
returned: success, path exists
type: bool
sample: true
isdir:
description: If the path is a directory or not.
returned: success, path exists
type: bool
sample: true
ishidden:
description: If the path is hidden or not.
returned: success, path exists
type: bool
sample: true
isjunction:
description: If the path is a junction point or not.
returned: success, path exists
type: bool
sample: true
islnk:
description: If the path is a symbolic link or not.
returned: success, path exists
type: bool
sample: true
isreadonly:
description: If the path is read only or not.
returned: success, path exists
type: bool
sample: true
isreg:
description: If the path is a regular file.
returned: success, path exists
type: bool
sample: true
isshared:
description: If the path is shared or not.
returned: success, path exists
type: bool
sample: true
lastaccesstime:
description: The last access time of the file represented in seconds since epoch.
returned: success, path exists
type: float
sample: 1477984205.15
lastwritetime:
description: The last modification time of the file represented in seconds since epoch.
returned: success, path exists
type: float
sample: 1477984205.15
lnk_source:
description: Target of the symlink normalized for the remote filesystem.
returned: success, path exists and the path is a symbolic link or junction point
type: str
sample: C:\temp\link
lnk_target:
description: Target of the symlink. Note that relative paths remain relative.
returned: success, path exists and the path is a symbolic link or junction point
type: str
sample: ..\link
nlink:
description: Number of links to the file (hard links).
returned: success, path exists
type: int
sample: 1
owner:
description: The owner of the file.
returned: success, path exists
type: str
sample: BUILTIN\Administrators
path:
description: The full absolute path to the file.
returned: success, path exists, file exists
type: str
sample: C:\foo.ini
sharename:
description: The name of share if folder is shared.
returned: success, path exists, file is a directory and isshared == True
type: str
sample: file-share
size:
description: The size in bytes of a file or folder.
returned: success, path exists, file is not a link
type: int
sample: 1024
"""
| 7,577
|
Python
|
.py
| 223
| 24.901345
| 111
| 0.604277
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,321
|
win_copy.ps1
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_copy.ps1
|
#!powershell
# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#Requires -Module Ansible.ModuleUtils.Legacy
#Requires -Module Ansible.ModuleUtils.Backup
$ErrorActionPreference = 'Stop'
$params = Parse-Args -arguments $args -supports_check_mode $true
$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false
$diff_mode = Get-AnsibleParam -obj $params -name "_ansible_diff" -type "bool" -default $false
# there are 4 modes to win_copy which are driven by the action plugins:
# explode: src is a zip file which needs to be extracted to dest, for use with multiple files
# query: win_copy action plugin wants to get the state of remote files to check whether it needs to send them
# remote: all copy action is happening remotely (remote_src=True)
# single: a single file has been copied, also used with template
$copy_mode = Get-AnsibleParam -obj $params -name "_copy_mode" -type "str" -default "single" -validateset "explode","query","remote","single"
# used in explode, remote and single mode
$src = Get-AnsibleParam -obj $params -name "src" -type "path" -failifempty ($copy_mode -in @("explode","process","single"))
$dest = Get-AnsibleParam -obj $params -name "dest" -type "path" -failifempty $true
$backup = Get-AnsibleParam -obj $params -name "backup" -type "bool" -default $false
# used in single mode
$original_basename = Get-AnsibleParam -obj $params -name "_original_basename" -type "str"
# used in query and remote mode
$force = Get-AnsibleParam -obj $params -name "force" -type "bool" -default $true
# used in query mode, contains the local files/directories/symlinks that are to be copied
$files = Get-AnsibleParam -obj $params -name "files" -type "list"
$directories = Get-AnsibleParam -obj $params -name "directories" -type "list"
$result = @{
changed = $false
}
if ($diff_mode) {
$result.diff = @{}
}
Function Copy-File($source, $dest) {
$diff = ""
$copy_file = $false
$source_checksum = $null
if ($force) {
$source_checksum = Get-FileChecksum -path $source
}
if (Test-Path -LiteralPath $dest -PathType Container) {
Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': dest is already a folder"
} elseif (Test-Path -LiteralPath $dest -PathType Leaf) {
if ($force) {
$target_checksum = Get-FileChecksum -path $dest
if ($source_checksum -ne $target_checksum) {
$copy_file = $true
}
}
} else {
$copy_file = $true
}
if ($copy_file) {
$file_dir = [System.IO.Path]::GetDirectoryName($dest)
# validate the parent dir is not a file and that it exists
if (Test-Path -LiteralPath $file_dir -PathType Leaf) {
Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': object at dest parent dir is not a folder"
} elseif (-not (Test-Path -LiteralPath $file_dir)) {
# directory doesn't exist, need to create
New-Item -Path $file_dir -ItemType Directory -WhatIf:$check_mode | Out-Null
$diff += "+$file_dir\`n"
}
if ($backup) {
$result.backup_file = Backup-File -path $dest -WhatIf:$check_mode
}
if (Test-Path -LiteralPath $dest -PathType Leaf) {
Remove-Item -LiteralPath $dest -Force -Recurse -WhatIf:$check_mode | Out-Null
$diff += "-$dest`n"
}
if (-not $check_mode) {
# cannot run with -WhatIf:$check_mode as if the parent dir didn't
# exist and was created above would still not exist in check mode
Copy-Item -LiteralPath $source -Destination $dest -Force | Out-Null
}
$diff += "+$dest`n"
$result.changed = $true
}
# ugly but to save us from running the checksum twice, let's return it for
# the main code to add it to $result
return ,@{ diff = $diff; checksum = $source_checksum }
}
Function Copy-Folder($source, $dest) {
$diff = ""
if (-not (Test-Path -LiteralPath $dest -PathType Container)) {
$parent_dir = [System.IO.Path]::GetDirectoryName($dest)
if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': object at dest parent dir is not a folder"
}
if (Test-Path -LiteralPath $dest -PathType Leaf) {
Fail-Json -obj $result -message "cannot copy folder from '$source' to '$dest': dest is already a file"
}
New-Item -Path $dest -ItemType Container -WhatIf:$check_mode | Out-Null
$diff += "+$dest\`n"
$result.changed = $true
}
$child_items = Get-ChildItem -LiteralPath $source -Force
foreach ($child_item in $child_items) {
$dest_child_path = Join-Path -Path $dest -ChildPath $child_item.Name
if ($child_item.PSIsContainer) {
$diff += (Copy-Folder -source $child_item.Fullname -dest $dest_child_path)
} else {
$diff += (Copy-File -source $child_item.Fullname -dest $dest_child_path).diff
}
}
return $diff
}
Function Get-FileSize($path) {
$file = Get-Item -LiteralPath $path -Force
if ($file.PSIsContainer) {
$size = (Get-ChildItem -Literalpath $file.FullName -Recurse -Force | `
Where-Object { $_.PSObject.Properties.Name -contains 'Length' } | `
Measure-Object -Property Length -Sum).Sum
if ($null -eq $size) {
$size = 0
}
} else {
$size = $file.Length
}
$size
}
Function Extract-Zip($src, $dest) {
$archive = [System.IO.Compression.ZipFile]::Open($src, [System.IO.Compression.ZipArchiveMode]::Read, [System.Text.Encoding]::UTF8)
foreach ($entry in $archive.Entries) {
$archive_name = $entry.FullName
# FullName may be appended with / or \, determine if it is padded and remove it
$padding_length = $archive_name.Length % 4
if ($padding_length -eq 0) {
$is_dir = $false
$base64_name = $archive_name
} elseif ($padding_length -eq 1) {
$is_dir = $true
if ($archive_name.EndsWith("/") -or $archive_name.EndsWith("`\")) {
$base64_name = $archive_name.Substring(0, $archive_name.Length - 1)
} else {
throw "invalid base64 archive name '$archive_name'"
}
} else {
throw "invalid base64 length '$archive_name'"
}
# to handle unicode character, win_copy action plugin has encoded the filename
$decoded_archive_name = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($base64_name))
# re-add the / to the entry full name if it was a directory
if ($is_dir) {
$decoded_archive_name = "$decoded_archive_name/"
}
$entry_target_path = [System.IO.Path]::Combine($dest, $decoded_archive_name)
$entry_dir = [System.IO.Path]::GetDirectoryName($entry_target_path)
if (-not (Test-Path -LiteralPath $entry_dir)) {
New-Item -Path $entry_dir -ItemType Directory -WhatIf:$check_mode | Out-Null
}
if ($is_dir -eq $false) {
if (-not $check_mode) {
[System.IO.Compression.ZipFileExtensions]::ExtractToFile($entry, $entry_target_path, $true)
}
}
}
$archive.Dispose() # release the handle of the zip file
}
Function Extract-ZipLegacy($src, $dest) {
if (-not (Test-Path -LiteralPath $dest)) {
New-Item -Path $dest -ItemType Directory -WhatIf:$check_mode | Out-Null
}
$shell = New-Object -ComObject Shell.Application
$zip = $shell.NameSpace($src)
$dest_path = $shell.NameSpace($dest)
foreach ($entry in $zip.Items()) {
$is_dir = $entry.IsFolder
$encoded_archive_entry = $entry.Name
# to handle unicode character, win_copy action plugin has encoded the filename
$decoded_archive_entry = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($encoded_archive_entry))
if ($is_dir) {
$decoded_archive_entry = "$decoded_archive_entry/"
}
$entry_target_path = [System.IO.Path]::Combine($dest, $decoded_archive_entry)
$entry_dir = [System.IO.Path]::GetDirectoryName($entry_target_path)
if (-not (Test-Path -LiteralPath $entry_dir)) {
New-Item -Path $entry_dir -ItemType Directory -WhatIf:$check_mode | Out-Null
}
if ($is_dir -eq $false -and (-not $check_mode)) {
# https://msdn.microsoft.com/en-us/library/windows/desktop/bb787866.aspx
# From Folder.CopyHere documentation, 1044 means:
# - 1024: do not display a user interface if an error occurs
# - 16: respond with "yes to all" for any dialog box that is displayed
# - 4: do not display a progress dialog box
$dest_path.CopyHere($entry, 1044)
# once file is extracted, we need to rename it with non base64 name
$combined_encoded_path = [System.IO.Path]::Combine($dest, $encoded_archive_entry)
Move-Item -LiteralPath $combined_encoded_path -Destination $entry_target_path -Force | Out-Null
}
}
}
if ($copy_mode -eq "query") {
# we only return a list of files/directories that need to be copied over
# the source of the local file will be the key used
$changed_files = @()
$changed_directories = @()
$changed_symlinks = @()
foreach ($file in $files) {
$filename = $file.dest
$local_checksum = $file.checksum
$filepath = Join-Path -Path $dest -ChildPath $filename
if (Test-Path -LiteralPath $filepath -PathType Leaf) {
if ($force) {
$checksum = Get-FileChecksum -path $filepath
if ($checksum -ne $local_checksum) {
$changed_files += $file
}
}
} elseif (Test-Path -LiteralPath $filepath -PathType Container) {
Fail-Json -obj $result -message "cannot copy file to dest '$filepath': object at path is already a directory"
} else {
$changed_files += $file
}
}
foreach ($directory in $directories) {
$dirname = $directory.dest
$dirpath = Join-Path -Path $dest -ChildPath $dirname
$parent_dir = [System.IO.Path]::GetDirectoryName($dirpath)
if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
Fail-Json -obj $result -message "cannot copy folder to dest '$dirpath': object at parent directory path is already a file"
}
if (Test-Path -LiteralPath $dirpath -PathType Leaf) {
Fail-Json -obj $result -message "cannot copy folder to dest '$dirpath': object at path is already a file"
} elseif (-not (Test-Path -LiteralPath $dirpath -PathType Container)) {
$changed_directories += $directory
}
}
# TODO: Handle symlinks
$result.files = $changed_files
$result.directories = $changed_directories
$result.symlinks = $changed_symlinks
} elseif ($copy_mode -eq "explode") {
# a single zip file containing the files and directories needs to be
# expanded this will always result in a change as the calculation is done
# on the win_copy action plugin and is only run if a change needs to occur
if (-not (Test-Path -LiteralPath $src -PathType Leaf)) {
Fail-Json -obj $result -message "Cannot expand src zip file: '$src' as it does not exist"
}
# Detect if the PS zip assemblies are available or whether to use Shell
$use_legacy = $false
try {
Add-Type -AssemblyName System.IO.Compression.FileSystem | Out-Null
Add-Type -AssemblyName System.IO.Compression | Out-Null
} catch {
$use_legacy = $true
}
if ($use_legacy) {
Extract-ZipLegacy -src $src -dest $dest
} else {
Extract-Zip -src $src -dest $dest
}
$result.changed = $true
} elseif ($copy_mode -eq "remote") {
# all copy actions are happening on the remote side (windows host), need
# too copy source and dest using PS code
$result.src = $src
$result.dest = $dest
if (-not (Test-Path -LiteralPath $src)) {
Fail-Json -obj $result -message "Cannot copy src file: '$src' as it does not exist"
}
if (Test-Path -LiteralPath $src -PathType Container) {
# we are copying a directory or the contents of a directory
$result.operation = 'folder_copy'
if ($src.EndsWith("/") -or $src.EndsWith("`\")) {
# copying the folder's contents to dest
$diff = ""
$child_files = Get-ChildItem -LiteralPath $src -Force
foreach ($child_file in $child_files) {
$dest_child_path = Join-Path -Path $dest -ChildPath $child_file.Name
if ($child_file.PSIsContainer) {
$diff += Copy-Folder -source $child_file.FullName -dest $dest_child_path
} else {
$diff += (Copy-File -source $child_file.FullName -dest $dest_child_path).diff
}
}
} else {
# copying the folder and it's contents to dest
$dest = Join-Path -Path $dest -ChildPath (Get-Item -LiteralPath $src -Force).Name
$result.dest = $dest
$diff = Copy-Folder -source $src -dest $dest
}
} else {
# we are just copying a single file to dest
$result.operation = 'file_copy'
$source_basename = (Get-Item -LiteralPath $src -Force).Name
$result.original_basename = $source_basename
if ($dest.EndsWith("/") -or $dest.EndsWith("`\")) {
$dest = Join-Path -Path $dest -ChildPath (Get-Item -LiteralPath $src -Force).Name
$result.dest = $dest
} else {
# check if the parent dir exists, this is only done if src is a
# file and dest if the path to a file (doesn't end with \ or /)
$parent_dir = Split-Path -LiteralPath $dest
if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file"
} elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) {
Fail-Json -obj $result -message "Destination directory '$parent_dir' does not exist"
}
}
$copy_result = Copy-File -source $src -dest $dest
$diff = $copy_result.diff
$result.checksum = $copy_result.checksum
}
# the file might not exist if running in check mode
if (-not $check_mode -or (Test-Path -LiteralPath $dest -PathType Leaf)) {
$result.size = Get-FileSize -path $dest
} else {
$result.size = $null
}
if ($diff_mode) {
$result.diff.prepared = $diff
}
} elseif ($copy_mode -eq "single") {
# a single file is located in src and we need to copy to dest, this will
# always result in a change as the calculation is done on the Ansible side
# before this is run. This should also never run in check mode
if (-not (Test-Path -LiteralPath $src -PathType Leaf)) {
Fail-Json -obj $result -message "Cannot copy src file: '$src' as it does not exist"
}
# the dest parameter is a directory, we need to append original_basename
if ($dest.EndsWith("/") -or $dest.EndsWith("`\") -or (Test-Path -LiteralPath $dest -PathType Container)) {
$remote_dest = Join-Path -Path $dest -ChildPath $original_basename
$parent_dir = Split-Path -LiteralPath $remote_dest
# when dest ends with /, we need to create the destination directories
if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file"
} elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) {
New-Item -Path $parent_dir -ItemType Directory | Out-Null
}
} else {
$remote_dest = $dest
$parent_dir = Split-Path -LiteralPath $remote_dest
# check if the dest parent dirs exist, need to fail if they don't
if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file"
} elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) {
Fail-Json -obj $result -message "Destination directory '$parent_dir' does not exist"
}
}
if ($backup) {
$result.backup_file = Backup-File -path $remote_dest -WhatIf:$check_mode
}
Copy-Item -LiteralPath $src -Destination $remote_dest -Force | Out-Null
$result.changed = $true
}
Exit-Json -obj $result
| 17,087
|
Python
|
.py
| 346
| 41.179191
| 140
| 0.627847
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,322
|
win_ping.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_ping.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_ping
version_added: "1.7"
short_description: A windows version of the classic ping module
description:
- Checks management connectivity of a windows host.
- This is NOT ICMP ping, this is just a trivial test module.
- For non-Windows targets, use the M(ping) module instead.
- For Network targets, use the M(net_ping) module instead.
options:
data:
description:
- Alternate data to return instead of 'pong'.
- If this parameter is set to C(crash), the module will cause an exception.
type: str
default: pong
seealso:
- module: ping
author:
- Chris Church (@cchurch)
"""
EXAMPLES = r"""
# Test connectivity to a windows host
# ansible winserver -m win_ping
- name: Example from an Ansible Playbook
win_ping:
- name: Induce an exception to see what happens
win_ping:
data: crash
"""
RETURN = r"""
ping:
description: Value provided with the data parameter.
returned: success
type: str
sample: pong
"""
| 1,451
|
Python
|
.py
| 47
| 27.617021
| 92
| 0.702006
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,323
|
win_user.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_user.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Matt Martz <matt@sivel.net>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_user
version_added: "1.7"
short_description: Manages local Windows user accounts
description:
- Manages local Windows user accounts.
- For non-Windows targets, use the M(user) module instead.
options:
name:
description:
- Name of the user to create, remove or modify.
type: str
required: yes
fullname:
description:
- Full name of the user.
type: str
version_added: "1.9"
description:
description:
- Description of the user.
type: str
version_added: "1.9"
password:
description:
- Optionally set the user's password to this (plain text) value.
type: str
update_password:
description:
- C(always) will update passwords if they differ. C(on_create) will
only set the password for newly created users.
type: str
choices: [ always, on_create ]
default: always
version_added: "1.9"
password_expired:
description:
- C(yes) will require the user to change their password at next login.
- C(no) will clear the expired password flag.
type: bool
version_added: "1.9"
password_never_expires:
description:
- C(yes) will set the password to never expire.
- C(no) will allow the password to expire.
type: bool
version_added: "1.9"
user_cannot_change_password:
description:
- C(yes) will prevent the user from changing their password.
- C(no) will allow the user to change their password.
type: bool
version_added: "1.9"
account_disabled:
description:
- C(yes) will disable the user account.
- C(no) will clear the disabled flag.
type: bool
version_added: "1.9"
account_locked:
description:
- C(no) will unlock the user account if locked.
choices: [ 'no' ]
version_added: "1.9"
groups:
description:
- Adds or removes the user from this comma-separated list of groups,
depending on the value of I(groups_action).
- When I(groups_action) is C(replace) and I(groups) is set to the empty
string ('groups='), the user is removed from all groups.
version_added: "1.9"
groups_action:
description:
- If C(add), the user is added to each group in I(groups) where not
already a member.
- If C(replace), the user is added as a member of each group in
I(groups) and removed from any other groups.
- If C(remove), the user is removed from each group in I(groups).
type: str
choices: [ add, replace, remove ]
default: replace
version_added: "1.9"
state:
description:
- When C(absent), removes the user account if it exists.
- When C(present), creates or updates the user account.
- When C(query) (new in 1.9), retrieves the user account details
without making any changes.
type: str
choices: [ absent, present, query ]
default: present
seealso:
- module: user
- module: win_domain_membership
- module: win_domain_user
- module: win_group
- module: win_group_membership
- module: win_user_profile
author:
- Paul Durivage (@angstwad)
- Chris Church (@cchurch)
"""
EXAMPLES = r"""
- name: Ensure user bob is present
win_user:
name: bob
password: B0bP4ssw0rd
state: present
groups:
- Users
- name: Ensure user bob is absent
win_user:
name: bob
state: absent
"""
RETURN = r"""
account_disabled:
description: Whether the user is disabled.
returned: user exists
type: bool
sample: false
account_locked:
description: Whether the user is locked.
returned: user exists
type: bool
sample: false
description:
description: The description set for the user.
returned: user exists
type: str
sample: Username for test
fullname:
description: The full name set for the user.
returned: user exists
type: str
sample: Test Username
groups:
description: A list of groups and their ADSI path the user is a member of.
returned: user exists
type: list
sample: [
{
"name": "Administrators",
"path": "WinNT://WORKGROUP/USER-PC/Administrators"
}
]
name:
description: The name of the user
returned: always
type: str
sample: username
password_expired:
description: Whether the password is expired.
returned: user exists
type: bool
sample: false
password_never_expires:
description: Whether the password is set to never expire.
returned: user exists
type: bool
sample: true
path:
description: The ADSI path for the user.
returned: user exists
type: str
sample: "WinNT://WORKGROUP/USER-PC/username"
sid:
description: The SID for the user.
returned: user exists
type: str
sample: S-1-5-21-3322259488-2828151810-3939402796-1001
user_cannot_change_password:
description: Whether the user can change their own password.
returned: user exists
type: bool
sample: false
"""
| 5,321
|
Python
|
.py
| 187
| 24.336898
| 92
| 0.692218
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,324
|
win_file.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_file.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r"""
---
module: win_file
version_added: "1.9.2"
short_description: Creates, touches or removes files or directories
description:
- Creates (empty) files, updates file modification stamps of existing files,
and can create or remove directories.
- Unlike M(file), does not modify ownership, permissions or manipulate links.
- For non-Windows targets, use the M(file) module instead.
options:
path:
description:
- Path to the file being managed.
required: yes
type: path
aliases: [ dest, name ]
state:
description:
- If C(directory), all immediate subdirectories will be created if they
do not exist.
- If C(file), the file will NOT be created if it does not exist, see the M(copy)
or M(template) module if you want that behavior.
- If C(absent), directories will be recursively deleted, and files will be removed.
- If C(touch), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
type: str
choices: [ absent, directory, file, touch ]
seealso:
- module: file
- module: win_acl
- module: win_acl_inheritance
- module: win_owner
- module: win_stat
author:
- Jon Hawkesworth (@jhawkesworth)
"""
EXAMPLES = r"""
- name: Touch a file (creates if not present, updates modification time if present)
win_file:
path: C:\Temp\foo.conf
state: touch
- name: Remove a file, if present
win_file:
path: C:\Temp\foo.conf
state: absent
- name: Create directory structure
win_file:
path: C:\Temp\folder\subfolder
state: directory
- name: Remove directory structure
win_file:
path: C:\Temp
state: absent
"""
| 2,184
|
Python
|
.py
| 63
| 30.444444
| 92
| 0.692999
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,325
|
win_certificate_store.py
|
ansible_ansible/test/support/windows-integration/plugins/modules/win_certificate_store.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r"""
---
module: win_certificate_store
version_added: '2.5'
short_description: Manages the certificate store
description:
- Used to import/export and remove certificates and keys from the local
certificate store.
- This module is not used to create certificates and will only manage existing
certs as a file or in the store.
- It can be used to import PEM, DER, P7B, PKCS12 (PFX) certificates and export
PEM, DER and PKCS12 certificates.
options:
state:
description:
- If C(present), will ensure that the certificate at I(path) is imported
into the certificate store specified.
- If C(absent), will ensure that the certificate specified by I(thumbprint)
or the thumbprint of the cert at I(path) is removed from the store
specified.
- If C(exported), will ensure the file at I(path) is a certificate
specified by I(thumbprint).
- When exporting a certificate, if I(path) is a directory then the module
will fail, otherwise the file will be replaced if needed.
type: str
choices: [ absent, exported, present ]
default: present
path:
description:
- The path to a certificate file.
- This is required when I(state) is C(present) or C(exported).
- When I(state) is C(absent) and I(thumbprint) is not specified, the
thumbprint is derived from the certificate at this path.
type: path
thumbprint:
description:
- The thumbprint as a hex string to either export or remove.
- See the examples for how to specify the thumbprint.
type: str
store_name:
description:
- The store name to use when importing a certificate or searching for a
certificate.
- "C(AddressBook): The X.509 certificate store for other users"
- "C(AuthRoot): The X.509 certificate store for third-party certificate authorities (CAs)"
- "C(CertificateAuthority): The X.509 certificate store for intermediate certificate authorities (CAs)"
- "C(Disallowed): The X.509 certificate store for revoked certificates"
- "C(My): The X.509 certificate store for personal certificates"
- "C(Root): The X.509 certificate store for trusted root certificate authorities (CAs)"
- "C(TrustedPeople): The X.509 certificate store for directly trusted people and resources"
- "C(TrustedPublisher): The X.509 certificate store for directly trusted publishers"
type: str
choices:
- AddressBook
- AuthRoot
- CertificateAuthority
- Disallowed
- My
- Root
- TrustedPeople
- TrustedPublisher
default: My
store_location:
description:
- The store location to use when importing a certificate or searching for a
certificate.
choices: [ CurrentUser, LocalMachine ]
default: LocalMachine
password:
description:
- The password of the pkcs12 certificate key.
- This is used when reading a pkcs12 certificate file or the password to
set when C(state=exported) and C(file_type=pkcs12).
- If the pkcs12 file has no password set or no password should be set on
the exported file, do not set this option.
type: str
key_exportable:
description:
- Whether to allow the private key to be exported.
- If C(no), then this module and other process will only be able to export
the certificate and the private key cannot be exported.
- Used when C(state=present) only.
type: bool
default: yes
key_storage:
description:
- Specifies where Windows will store the private key when it is imported.
- When set to C(default), the default option as set by Windows is used, typically C(user).
- When set to C(machine), the key is stored in a path accessible by various
users.
- When set to C(user), the key is stored in a path only accessible by the
current user.
- Used when C(state=present) only and cannot be changed once imported.
- See U(https://msdn.microsoft.com/en-us/library/system.security.cryptography.x509certificates.x509keystorageflags.aspx)
for more details.
type: str
choices: [ default, machine, user ]
default: default
file_type:
description:
- The file type to export the certificate as when C(state=exported).
- C(der) is a binary ASN.1 encoded file.
- C(pem) is a base64 encoded file of a der file in the OpenSSL form.
- C(pkcs12) (also known as pfx) is a binary container that contains both
the certificate and private key unlike the other options.
- When C(pkcs12) is set and the private key is not exportable or accessible
by the current user, it will throw an exception.
type: str
choices: [ der, pem, pkcs12 ]
default: der
notes:
- Some actions on PKCS12 certificates and keys may fail with the error
C(the specified network password is not correct), either use CredSSP or
Kerberos with credential delegation, or use C(become) to bypass these
restrictions.
- The certificates must be located on the Windows host to be set with I(path).
- When importing a certificate for usage in IIS, it is generally required
to use the C(machine) key_storage option, as both C(default) and C(user)
will make the private key unreadable to IIS APPPOOL identities and prevent
binding the certificate to the https endpoint.
author:
- Jordan Borean (@jborean93)
"""
EXAMPLES = r"""
- name: Import a certificate
win_certificate_store:
path: C:\Temp\cert.pem
state: present
- name: Import pfx certificate that is password protected
win_certificate_store:
path: C:\Temp\cert.pfx
state: present
password: VeryStrongPasswordHere!
become: yes
become_method: runas
- name: Import pfx certificate without password and set private key as un-exportable
win_certificate_store:
path: C:\Temp\cert.pfx
state: present
key_exportable: no
# usually you don't set this here but it is for illustrative purposes
vars:
ansible_winrm_transport: credssp
- name: Remove a certificate based on file thumbprint
win_certificate_store:
path: C:\Temp\cert.pem
state: absent
- name: Remove a certificate based on thumbprint
win_certificate_store:
thumbprint: BD7AF104CF1872BDB518D95C9534EA941665FD27
state: absent
- name: Remove certificate based on thumbprint is CurrentUser/TrustedPublishers store
win_certificate_store:
thumbprint: BD7AF104CF1872BDB518D95C9534EA941665FD27
state: absent
store_location: CurrentUser
store_name: TrustedPublisher
- name: Export certificate as der encoded file
win_certificate_store:
path: C:\Temp\cert.cer
state: exported
file_type: der
- name: Export certificate and key as pfx encoded file
win_certificate_store:
path: C:\Temp\cert.pfx
state: exported
file_type: pkcs12
password: AnotherStrongPass!
become: yes
become_method: runas
become_user: SYSTEM
- name: Import certificate be used by IIS
win_certificate_store:
path: C:\Temp\cert.pfx
file_type: pkcs12
password: StrongPassword!
store_location: LocalMachine
key_storage: machine
state: present
"""
RETURN = r"""
thumbprints:
description: A list of certificate thumbprints that were touched by the
module.
returned: success
type: list
sample: ["BC05633694E675449136679A658281F17A191087"]
"""
| 7,590
|
Python
|
.py
| 195
| 34.65641
| 124
| 0.732593
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,326
|
ios.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/action/ios.py
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
import sys
import copy
from ansible_collections.ansible.netcommon.plugins.action.network import (
ActionModule as ActionNetworkModule,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
load_provider,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
ios_provider_spec,
)
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
module_name = self._task.action.split(".")[-1]
self._config_module = True if module_name == "ios_config" else False
persistent_connection = self._play_context.connection.split(".")[-1]
warnings = []
if persistent_connection == "network_cli":
provider = self._task.args.get("provider", {})
if any(provider.values()):
display.warning(
"provider is unnecessary when using network_cli and will be ignored"
)
del self._task.args["provider"]
elif self._play_context.connection == "local":
provider = load_provider(ios_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = "ansible.netcommon.network_cli"
pc.network_os = "cisco.ios.ios"
pc.remote_addr = provider["host"] or self._play_context.remote_addr
pc.port = int(provider["port"] or self._play_context.port or 22)
pc.remote_user = (
provider["username"] or self._play_context.connection_user
)
pc.password = provider["password"] or self._play_context.password
pc.private_key_file = (
provider["ssh_keyfile"] or self._play_context.private_key_file
)
pc.become = provider["authorize"] or False
if pc.become:
pc.become_method = "enable"
pc.become_pass = provider["auth_pass"]
connection = self._shared_loader_obj.connection_loader.get(
"ansible.netcommon.persistent",
pc,
sys.stdin,
task_uuid=self._task._uuid,
)
# TODO: Remove below code after ansible minimal is cut out
if connection is None:
pc.connection = "network_cli"
pc.network_os = "ios"
connection = self._shared_loader_obj.connection_loader.get(
"persistent", pc, sys.stdin, task_uuid=self._task._uuid
)
display.vvv(
"using connection plugin %s (was local)" % pc.connection,
pc.remote_addr,
)
command_timeout = (
int(provider["timeout"])
if provider["timeout"]
else connection.get_option("persistent_command_timeout")
)
connection.set_options(
direct={"persistent_command_timeout": command_timeout}
)
socket_path = connection.run()
display.vvvv("socket_path: %s" % socket_path, pc.remote_addr)
if not socket_path:
return {
"failed": True,
"msg": "unable to open shell. Please see: "
+ "https://docs.ansible.com/ansible/latest/network/user_guide/network_debug_troubleshooting.html#category-unable-to-open-shell",
}
task_vars["ansible_socket"] = socket_path
warnings.append(
[
"connection local support for this module is deprecated and will be removed in version 2.14, use connection %s"
% pc.connection
]
)
else:
return {
"failed": True,
"msg": "Connection type %s is not valid for this module"
% self._play_context.connection,
}
result = super(ActionModule, self).run(task_vars=task_vars)
if warnings:
if "warnings" in result:
result["warnings"].extend(warnings)
else:
result["warnings"] = warnings
return result
| 5,060
|
Python
|
.py
| 117
| 32.376068
| 148
| 0.595982
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,327
|
ios.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py
|
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
DOCUMENTATION = """
---
author: Ansible Networking Team
cliconf: ios
short_description: Use ios cliconf to run command on Cisco IOS platform
description:
- This ios plugin provides low level abstraction apis for
sending and receiving CLI commands from Cisco IOS network devices.
version_added: "2.4"
"""
import re
import time
import json
from collections.abc import Mapping
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.six import iteritems
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import (
NetworkConfig,
dumps,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
)
from ansible.plugins.cliconf import CliconfBase, enable_mode
class Cliconf(CliconfBase):
@enable_mode
def get_config(self, source="running", flags=None, format=None):
if source not in ("running", "startup"):
raise ValueError(
"fetching configuration from %s is not supported" % source
)
if format:
raise ValueError(
"'format' value %s is not supported for get_config" % format
)
if not flags:
flags = []
if source == "running":
cmd = "show running-config "
else:
cmd = "show startup-config "
cmd += " ".join(to_list(flags))
cmd = cmd.strip()
return self.send_command(cmd)
def get_diff(
self,
candidate=None,
running=None,
diff_match="line",
diff_ignore_lines=None,
path=None,
diff_replace="line",
):
"""
Generate diff between candidate and running configuration. If the
remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
candidate and running configurations are not required to be passed as argument.
In case if onbox diff capability is not supported candidate argument is mandatory
and running argument is optional.
:param candidate: The configuration which is expected to be present on remote host.
:param running: The base configuration which is used to generate diff.
:param diff_match: Instructs how to match the candidate configuration with current device configuration
Valid values are 'line', 'strict', 'exact', 'none'.
'line' - commands are matched line by line
'strict' - command lines are matched with respect to position
'exact' - command lines must be an equal match
'none' - will not compare the candidate configuration with the running configuration
:param diff_ignore_lines: Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
:param path: The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
:param diff_replace: Instructs on the way to perform the configuration on the device.
If the replace argument is set to I(line) then the modified lines are
pushed to the device in configuration mode. If the replace argument is
set to I(block) then the entire command block is pushed to the device in
configuration mode if any line is not correct.
:return: Configuration diff in json format.
{
'config_diff': '',
'banner_diff': {}
}
"""
diff = {}
device_operations = self.get_device_operations()
option_values = self.get_option_values()
if candidate is None and device_operations["supports_generate_diff"]:
raise ValueError(
"candidate configuration is required to generate diff"
)
if diff_match not in option_values["diff_match"]:
raise ValueError(
"'match' value %s in invalid, valid values are %s"
% (diff_match, ", ".join(option_values["diff_match"]))
)
if diff_replace not in option_values["diff_replace"]:
raise ValueError(
"'replace' value %s in invalid, valid values are %s"
% (diff_replace, ", ".join(option_values["diff_replace"]))
)
# prepare candidate configuration
candidate_obj = NetworkConfig(indent=1)
want_src, want_banners = self._extract_banners(candidate)
candidate_obj.load(want_src)
if running and diff_match != "none":
# running configuration
have_src, have_banners = self._extract_banners(running)
running_obj = NetworkConfig(
indent=1, contents=have_src, ignore_lines=diff_ignore_lines
)
configdiffobjs = candidate_obj.difference(
running_obj, path=path, match=diff_match, replace=diff_replace
)
else:
configdiffobjs = candidate_obj.items
have_banners = {}
diff["config_diff"] = (
dumps(configdiffobjs, "commands") if configdiffobjs else ""
)
banners = self._diff_banners(want_banners, have_banners)
diff["banner_diff"] = banners if banners else {}
return diff
@enable_mode
def edit_config(
self, candidate=None, commit=True, replace=None, comment=None
):
resp = {}
operations = self.get_device_operations()
self.check_edit_config_capability(
operations, candidate, commit, replace, comment
)
results = []
requests = []
if commit:
self.send_command("configure terminal")
for line in to_list(candidate):
if not isinstance(line, Mapping):
line = {"command": line}
cmd = line["command"]
if cmd != "end" and cmd[0] != "!":
results.append(self.send_command(**line))
requests.append(cmd)
self.send_command("end")
else:
raise ValueError("check mode is not supported")
resp["request"] = requests
resp["response"] = results
return resp
def edit_macro(
self, candidate=None, commit=True, replace=None, comment=None
):
"""
ios_config:
lines: "{{ macro_lines }}"
parents: "macro name {{ macro_name }}"
after: '@'
match: line
replace: block
"""
resp = {}
operations = self.get_device_operations()
self.check_edit_config_capability(
operations, candidate, commit, replace, comment
)
results = []
requests = []
if commit:
commands = ""
self.send_command("config terminal")
time.sleep(0.1)
# first item: macro command
commands += candidate.pop(0) + "\n"
multiline_delimiter = candidate.pop(-1)
for line in candidate:
commands += " " + line + "\n"
commands += multiline_delimiter + "\n"
obj = {"command": commands, "sendonly": True}
results.append(self.send_command(**obj))
requests.append(commands)
time.sleep(0.1)
self.send_command("end", sendonly=True)
time.sleep(0.1)
results.append(self.send_command("\n"))
requests.append("\n")
resp["request"] = requests
resp["response"] = results
return resp
def get(
self,
command=None,
prompt=None,
answer=None,
sendonly=False,
output=None,
newline=True,
check_all=False,
):
if not command:
raise ValueError("must provide value of command to execute")
if output:
raise ValueError(
"'output' value %s is not supported for get" % output
)
return self.send_command(
command=command,
prompt=prompt,
answer=answer,
sendonly=sendonly,
newline=newline,
check_all=check_all,
)
def get_device_info(self):
device_info = {}
device_info["network_os"] = "ios"
reply = self.get(command="show version")
data = to_text(reply, errors="surrogate_or_strict").strip()
match = re.search(r"Version (\S+)", data)
if match:
device_info["network_os_version"] = match.group(1).strip(",")
model_search_strs = [
r"^[Cc]isco (.+) \(revision",
r"^[Cc]isco (\S+).+bytes of .*memory",
]
for item in model_search_strs:
match = re.search(item, data, re.M)
if match:
version = match.group(1).split(" ")
device_info["network_os_model"] = version[0]
break
match = re.search(r"^(.+) uptime", data, re.M)
if match:
device_info["network_os_hostname"] = match.group(1)
match = re.search(r'image file is "(.+)"', data)
if match:
device_info["network_os_image"] = match.group(1)
return device_info
def get_device_operations(self):
return {
"supports_diff_replace": True,
"supports_commit": False,
"supports_rollback": False,
"supports_defaults": True,
"supports_onbox_diff": False,
"supports_commit_comment": False,
"supports_multiline_delimiter": True,
"supports_diff_match": True,
"supports_diff_ignore_lines": True,
"supports_generate_diff": True,
"supports_replace": False,
}
def get_option_values(self):
return {
"format": ["text"],
"diff_match": ["line", "strict", "exact", "none"],
"diff_replace": ["line", "block"],
"output": [],
}
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
result["rpc"] += [
"edit_banner",
"get_diff",
"run_commands",
"get_defaults_flag",
]
result["device_operations"] = self.get_device_operations()
result.update(self.get_option_values())
return json.dumps(result)
def edit_banner(
self, candidate=None, multiline_delimiter="@", commit=True
):
"""
Edit banner on remote device
:param banners: Banners to be loaded in json format
:param multiline_delimiter: Line delimiter for banner
:param commit: Boolean value that indicates if the device candidate
configuration should be pushed in the running configuration or discarded.
:param diff: Boolean flag to indicate if configuration that is applied on remote host should
generated and returned in response or not
:return: Returns response of executing the configuration command received
from remote host
"""
resp = {}
banners_obj = json.loads(candidate)
results = []
requests = []
if commit:
for key, value in iteritems(banners_obj):
key += " %s" % multiline_delimiter
self.send_command("config terminal", sendonly=True)
for cmd in [key, value, multiline_delimiter]:
obj = {"command": cmd, "sendonly": True}
results.append(self.send_command(**obj))
requests.append(cmd)
self.send_command("end", sendonly=True)
time.sleep(0.1)
results.append(self.send_command("\n"))
requests.append("\n")
resp["request"] = requests
resp["response"] = results
return resp
def run_commands(self, commands=None, check_rc=True):
if commands is None:
raise ValueError("'commands' value is required")
responses = list()
for cmd in to_list(commands):
if not isinstance(cmd, Mapping):
cmd = {"command": cmd}
output = cmd.pop("output", None)
if output:
raise ValueError(
"'output' value %s is not supported for run_commands"
% output
)
try:
out = self.send_command(**cmd)
except AnsibleConnectionFailure as e:
if check_rc:
raise
out = getattr(e, "err", to_text(e))
responses.append(out)
return responses
def get_defaults_flag(self):
"""
The method identifies the filter that should be used to fetch running-configuration
with defaults.
:return: valid default filter
"""
out = self.get("show running-config ?")
out = to_text(out, errors="surrogate_then_replace")
commands = set()
for line in out.splitlines():
if line.strip():
commands.add(line.strip().split()[0])
if "all" in commands:
return "all"
else:
return "full"
def set_cli_prompt_context(self):
"""
Make sure we are in the operational cli mode
:return: None
"""
if self._connection.connected:
out = self._connection.get_prompt()
if out is None:
raise AnsibleConnectionFailure(
message=u"cli prompt is not identified from the last received"
u" response window: %s"
% self._connection._last_recv_window
)
if re.search(
r"config.*\)#",
to_text(out, errors="surrogate_then_replace").strip(),
):
self._connection.queue_message(
"vvvv", "wrong context, sending end to device"
)
self._connection.send_command("end")
def _extract_banners(self, config):
banners = {}
banner_cmds = re.findall(r"^banner (\w+)", config, re.M)
for cmd in banner_cmds:
regex = r"banner %s \^C(.+?)(?=\^C)" % cmd
match = re.search(regex, config, re.S)
if match:
key = "banner %s" % cmd
banners[key] = match.group(1).strip()
for cmd in banner_cmds:
regex = r"banner %s \^C(.+?)(?=\^C)" % cmd
match = re.search(regex, config, re.S)
if match:
config = config.replace(str(match.group(1)), "")
config = re.sub(r"banner \w+ \^C\^C", "!! banner removed", config)
return config, banners
def _diff_banners(self, want, have):
candidate = {}
for key, value in iteritems(want):
if value != have.get(key):
candidate[key] = value
return candidate
| 16,424
|
Python
|
.py
| 404
| 29.554455
| 111
| 0.572342
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,328
|
ios_command.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "network",
}
DOCUMENTATION = """module: ios_command
author: Peter Sprygada (@privateip)
short_description: Run commands on remote devices running Cisco IOS
description:
- Sends arbitrary commands to an ios node and returns the results read from the device.
This module includes an argument that will cause the module to wait for a specific
condition before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode. Please use
M(ios_config) to configure IOS devices.
extends_documentation_fragment:
- cisco.ios.ios
notes:
- Tested against IOS 15.6
options:
commands:
description:
- List of commands to send to the remote ios device over the configured provider.
The resulting output from the command is returned. If the I(wait_for) argument
is provided, the module is not returned until the condition is satisfied or
the number of retries has expired. If a command sent to the device requires
answering a prompt, it is possible to pass a dict containing I(command), I(answer)
and I(prompt). Common answers are 'y' or "\r" (carriage return, must be double
quotes). See examples.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the command. The task will
wait for each condition to be true before moving forward. If the conditional
is not true within the configured number of retries, the task fails. See examples.
aliases:
- waitfor
match:
description:
- The I(match) argument is used in conjunction with the I(wait_for) argument to
specify the match policy. Valid values are C(all) or C(any). If the value
is set to C(all) then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be satisfied.
default: all
choices:
- any
- all
retries:
description:
- Specifies the number of retries a command should by tried before it is considered
failed. The command is run on the target device every retry and evaluated against
the I(wait_for) conditions.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries of the command. If
the command does not pass the specified conditions, the interval indicates how
long to wait before trying the command again.
default: 1
"""
EXAMPLES = r"""
tasks:
- name: run show version on remote devices
ios_command:
commands: show version
- name: run show version and check to see if output contains IOS
ios_command:
commands: show version
wait_for: result[0] contains IOS
- name: run multiple commands on remote nodes
ios_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
ios_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains IOS
- result[1] contains Loopback0
- name: run commands that require answering a prompt
ios_command:
commands:
- command: 'clear counters GigabitEthernet0/1'
prompt: 'Clear "show interface" counters on this interface \[confirm\]'
answer: 'y'
- command: 'clear counters GigabitEthernet0/2'
prompt: '[confirm]'
answer: "\r"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import (
Conditional,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
transform_commands,
to_lines,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
run_commands,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
ios_argument_spec,
)
def parse_commands(module, warnings):
commands = transform_commands(module)
if module.check_mode:
for item in list(commands):
if not item["command"].startswith("show"):
warnings.append(
"Only show commands are supported when using check mode, not "
"executing %s" % item["command"]
)
commands.remove(item)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
commands=dict(type="list", required=True),
wait_for=dict(type="list", aliases=["waitfor"]),
match=dict(default="all", choices=["all", "any"]),
retries=dict(default=10, type="int"),
interval=dict(default=1, type="int"),
)
argument_spec.update(ios_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec, supports_check_mode=True
)
warnings = list()
result = {"changed": False, "warnings": warnings}
commands = parse_commands(module, warnings)
wait_for = module.params["wait_for"] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError as exc:
module.fail_json(msg=to_text(exc))
retries = module.params["retries"]
interval = module.params["interval"]
match = module.params["match"]
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == "any":
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = "One or more conditional statements have not been satisfied"
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update(
{"stdout": responses, "stdout_lines": list(to_lines(responses))}
)
module.exit_json(**result)
if __name__ == "__main__":
main()
| 7,536
|
Python
|
.py
| 200
| 32.26
| 95
| 0.692581
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,329
|
ios_config.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "network",
}
DOCUMENTATION = """module: ios_config
author: Peter Sprygada (@privateip)
short_description: Manage Cisco IOS configuration sections
description:
- Cisco IOS configurations use a simple block indent file syntax for segmenting configuration
into sections. This module provides an implementation for working with IOS configuration
sections in a deterministic way.
extends_documentation_fragment:
- cisco.ios.ios
notes:
- Tested against IOS 15.6
- Abbreviated commands are NOT idempotent,
see L(Network FAQ,../network/user_guide/faq.html#why-do-the-config-modules-always-return-changed-true-with-abbreviated-commands).
options:
lines:
description:
- The ordered set of commands that should be configured in the section. The commands
must be the exact same commands as found in the device running-config. Be sure
to note the configuration command syntax as some commands are automatically
modified by the device config parser.
aliases:
- commands
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy the
commands should be checked against. If the parents argument is omitted, the
commands are checked against the set of top level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration or configuration
template to load. The path to the source file can either be the full path on
the Ansible control host or a relative path from the playbook or role root directory. This
argument is mutually exclusive with I(lines), I(parents).
before:
description:
- The ordered set of commands to push on to the command stack if a change needs
to be made. This allows the playbook designer the opportunity to perform configuration
commands prior to pushing any changes without affecting how the set of commands
are matched against the system.
after:
description:
- The ordered set of commands to append to the end of the command stack if a change
needs to be made. Just like with I(before) this allows the playbook designer
to append a set of commands to be executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of the set of commands
against the current device config. If match is set to I(line), commands are
matched line by line. If match is set to I(strict), command lines are matched
with respect to position. If match is set to I(exact), command lines must be
an equal match. Finally, if match is set to I(none), the module will not attempt
to compare the source configuration with the running configuration on the remote
device.
choices:
- line
- strict
- exact
- none
default: line
replace:
description:
- Instructs the module on the way to perform the configuration on the device.
If the replace argument is set to I(line) then the modified lines are pushed
to the device in configuration mode. If the replace argument is set to I(block)
then the entire command block is pushed to the device in configuration mode
if any line is not correct.
default: line
choices:
- line
- block
multiline_delimiter:
description:
- This argument is used when pushing a multiline configuration element to the
IOS device. It specifies the character to use as the delimiting character. This
only applies to the configuration action.
default: '@'
backup:
description:
- This argument will cause the module to create a full backup of the current C(running-config)
from the remote device before any changes are made. If the C(backup_options)
value is not given, the backup file is written to the C(backup) folder in the
playbook root directory or role root directory, if playbook is part of an ansible
role. If the directory does not exist, it is created.
type: bool
default: 'no'
running_config:
description:
- The module, by default, will connect to the remote device and retrieve the current
running-config to use as a base for comparing against the contents of source.
There are times when it is not desirable to have the task get the current running-config
for every task in a playbook. The I(running_config) argument allows the implementer
to pass in the configuration to use as the base config for comparison.
aliases:
- config
defaults:
description:
- This argument specifies whether or not to collect all defaults when getting
the remote device running config. When enabled, the module will get the current
config by issuing the command C(show running-config all).
type: bool
default: 'no'
save_when:
description:
- When changes are made to the device running-configuration, the changes are not
copied to non-volatile storage by default. Using this argument will change
that before. If the argument is set to I(always), then the running-config will
always be copied to the startup-config and the I(modified) flag will always
be set to True. If the argument is set to I(modified), then the running-config
will only be copied to the startup-config if it has changed since the last save
to startup-config. If the argument is set to I(never), the running-config will
never be copied to the startup-config. If the argument is set to I(changed),
then the running-config will only be copied to the startup-config if the task
has made a change. I(changed) was added in Ansible 2.5.
default: never
choices:
- always
- never
- modified
- changed
diff_against:
description:
- When using the C(ansible-playbook --diff) command line argument the module can
generate diffs against different sources.
- When this option is configure as I(startup), the module will return the diff
of the running-config against the startup-config.
- When this option is configured as I(intended), the module will return the diff
of the running-config against the configuration provided in the C(intended_config)
argument.
- When this option is configured as I(running), the module will return the before
and after diff of the running-config with respect to any changes made to the
device configuration.
choices:
- running
- startup
- intended
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be ignored during
the diff. This is used for lines in the configuration that are automatically
updated by the system. This argument takes a list of regular expressions or
exact line matches.
intended_config:
description:
- The C(intended_config) provides the master configuration that the node should
conform to and is used to check the final running-config against. This argument
will not modify any settings on the remote device and is strictly used to check
the compliance of the current device's configuration against. When specifying
this argument, the task should also modify the C(diff_against) value and set
it to I(intended).
backup_options:
description:
- This is a dict object containing configurable options related to backup file
path. The value of this option is read only when C(backup) is set to I(yes),
if C(backup) is set to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the filename
is not given it will be generated based on the hostname, current time and
date in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will
be first created and the filename is either the value of C(filename) or
default filename as described in C(filename) options description. If the
path value is not given in that case a I(backup) directory will be created
in the current working directory and backup configuration will be copied
in C(filename) within I(backup) directory.
type: path
type: dict
"""
EXAMPLES = """
- name: configure top level configuration
ios_config:
lines: hostname {{ inventory_hostname }}
- name: configure interface settings
ios_config:
lines:
- description test interface
- ip address 172.31.1.1 255.255.255.0
parents: interface Ethernet1
- name: configure ip helpers on multiple interfaces
ios_config:
lines:
- ip helper-address 172.26.1.10
- ip helper-address 172.26.3.8
parents: "{{ item }}"
with_items:
- interface Ethernet1
- interface Ethernet2
- interface GigabitEthernet1
- name: configure policer in Scavenger class
ios_config:
lines:
- conform-action transmit
- exceed-action drop
parents:
- policy-map Foo
- class Scavenger
- police cir 64000
- name: load new acl into device
ios_config:
lines:
- 10 permit ip host 192.0.2.1 any log
- 20 permit ip host 192.0.2.2 any log
- 30 permit ip host 192.0.2.3 any log
- 40 permit ip host 192.0.2.4 any log
- 50 permit ip host 192.0.2.5 any log
parents: ip access-list extended test
before: no ip access-list extended test
match: exact
- name: check the running-config against master config
ios_config:
diff_against: intended
intended_config: "{{ lookup('file', 'master.cfg') }}"
- name: check the startup-config against the running-config
ios_config:
diff_against: startup
diff_ignore_lines:
- ntp clock .*
- name: save running to startup when modified
ios_config:
save_when: modified
- name: for idempotency, use full-form commands
ios_config:
lines:
# - shut
- shutdown
# parents: int gig1/0/11
parents: interface GigabitEthernet1/0/11
# Set boot image based on comparison to a group_var (version) and the version
# that is returned from the `ios_facts` module
- name: SETTING BOOT IMAGE
ios_config:
lines:
- no boot system
- boot system flash bootflash:{{new_image}}
host: "{{ inventory_hostname }}"
when: ansible_net_version != version
- name: render a Jinja2 template onto an IOS device
ios_config:
backup: yes
src: ios_template.j2
- name: configurable backup path
ios_config:
src: ios_template.j2
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'router ospf 1', 'router-id 192.0.2.1']
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'router ospf 1', 'router-id 192.0.2.1']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/ios_config.2016-07-16@22:28:34
filename:
description: The name of the backup file
returned: when backup is yes and filename is not specified in backup options
type: str
sample: ios_config.2016-07-16@22:28:34
shortname:
description: The full path to the backup file excluding the timestamp
returned: when backup is yes and filename is not specified in backup options
type: str
sample: /playbooks/ansible/backup/ios_config
date:
description: The date extracted from the backup file name
returned: when backup is yes
type: str
sample: "2016-07-16"
time:
description: The time extracted from the backup file name
returned: when backup is yes
type: str
sample: "22:28:34"
"""
import json
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.connection import ConnectionError
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
run_commands,
get_config,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
get_defaults_flag,
get_connection,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
ios_argument_spec,
)
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import (
NetworkConfig,
dumps,
)
def check_args(module, warnings):
if module.params["multiline_delimiter"]:
if len(module.params["multiline_delimiter"]) != 1:
module.fail_json(
msg="multiline_delimiter value can only be a "
"single character"
)
def edit_config_or_macro(connection, commands):
# only catch the macro configuration command,
# not negated 'no' variation.
if commands[0].startswith("macro name"):
connection.edit_macro(candidate=commands)
else:
connection.edit_config(candidate=commands)
def get_candidate_config(module):
candidate = ""
if module.params["src"]:
candidate = module.params["src"]
elif module.params["lines"]:
candidate_obj = NetworkConfig(indent=1)
parents = module.params["parents"] or list()
candidate_obj.add(module.params["lines"], parents=parents)
candidate = dumps(candidate_obj, "raw")
return candidate
def get_running_config(module, current_config=None, flags=None):
running = module.params["running_config"]
if not running:
if not module.params["defaults"] and current_config:
running = current_config
else:
running = get_config(module, flags=flags)
return running
def save_config(module, result):
result["changed"] = True
if not module.check_mode:
run_commands(module, "copy running-config startup-config\r")
else:
module.warn(
"Skipping command `copy running-config startup-config` "
"due to check_mode. Configuration not copied to "
"non-volatile storage"
)
def main():
""" main entry point for module execution
"""
backup_spec = dict(filename=dict(), dir_path=dict(type="path"))
argument_spec = dict(
src=dict(type="path"),
lines=dict(aliases=["commands"], type="list"),
parents=dict(type="list"),
before=dict(type="list"),
after=dict(type="list"),
match=dict(
default="line", choices=["line", "strict", "exact", "none"]
),
replace=dict(default="line", choices=["line", "block"]),
multiline_delimiter=dict(default="@"),
running_config=dict(aliases=["config"]),
intended_config=dict(),
defaults=dict(type="bool", default=False),
backup=dict(type="bool", default=False),
backup_options=dict(type="dict", options=backup_spec),
save_when=dict(
choices=["always", "never", "modified", "changed"], default="never"
),
diff_against=dict(choices=["startup", "intended", "running"]),
diff_ignore_lines=dict(type="list"),
)
argument_spec.update(ios_argument_spec)
mutually_exclusive = [("lines", "src"), ("parents", "src")]
required_if = [
("match", "strict", ["lines"]),
("match", "exact", ["lines"]),
("replace", "block", ["lines"]),
("diff_against", "intended", ["intended_config"]),
]
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True,
)
result = {"changed": False}
warnings = list()
check_args(module, warnings)
result["warnings"] = warnings
diff_ignore_lines = module.params["diff_ignore_lines"]
config = None
contents = None
flags = get_defaults_flag(module) if module.params["defaults"] else []
connection = get_connection(module)
if module.params["backup"] or (
module._diff and module.params["diff_against"] == "running"
):
contents = get_config(module, flags=flags)
config = NetworkConfig(indent=1, contents=contents)
if module.params["backup"]:
result["__backup__"] = contents
if any((module.params["lines"], module.params["src"])):
match = module.params["match"]
replace = module.params["replace"]
path = module.params["parents"]
candidate = get_candidate_config(module)
running = get_running_config(module, contents, flags=flags)
try:
response = connection.get_diff(
candidate=candidate,
running=running,
diff_match=match,
diff_ignore_lines=diff_ignore_lines,
path=path,
diff_replace=replace,
)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
config_diff = response["config_diff"]
banner_diff = response["banner_diff"]
if config_diff or banner_diff:
commands = config_diff.split("\n")
if module.params["before"]:
commands[:0] = module.params["before"]
if module.params["after"]:
commands.extend(module.params["after"])
result["commands"] = commands
result["updates"] = commands
result["banners"] = banner_diff
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
if commands:
edit_config_or_macro(connection, commands)
if banner_diff:
connection.edit_banner(
candidate=json.dumps(banner_diff),
multiline_delimiter=module.params[
"multiline_delimiter"
],
)
result["changed"] = True
running_config = module.params["running_config"]
startup_config = None
if module.params["save_when"] == "always":
save_config(module, result)
elif module.params["save_when"] == "modified":
output = run_commands(
module, ["show running-config", "show startup-config"]
)
running_config = NetworkConfig(
indent=1, contents=output[0], ignore_lines=diff_ignore_lines
)
startup_config = NetworkConfig(
indent=1, contents=output[1], ignore_lines=diff_ignore_lines
)
if running_config.sha1 != startup_config.sha1:
save_config(module, result)
elif module.params["save_when"] == "changed" and result["changed"]:
save_config(module, result)
if module._diff:
if not running_config:
output = run_commands(module, "show running-config")
contents = output[0]
else:
contents = running_config
# recreate the object in order to process diff_ignore_lines
running_config = NetworkConfig(
indent=1, contents=contents, ignore_lines=diff_ignore_lines
)
if module.params["diff_against"] == "running":
if module.check_mode:
module.warn(
"unable to perform diff against running-config due to check mode"
)
contents = None
else:
contents = config.config_text
elif module.params["diff_against"] == "startup":
if not startup_config:
output = run_commands(module, "show startup-config")
contents = output[0]
else:
contents = startup_config.config_text
elif module.params["diff_against"] == "intended":
contents = module.params["intended_config"]
if contents is not None:
base_config = NetworkConfig(
indent=1, contents=contents, ignore_lines=diff_ignore_lines
)
if running_config.sha1 != base_config.sha1:
before, after = "", ""
if module.params["diff_against"] == "intended":
before = running_config
after = base_config
elif module.params["diff_against"] in ("startup", "running"):
before = base_config
after = running_config
result.update(
{
"changed": True,
"diff": {"before": str(before), "after": str(after)},
}
)
module.exit_json(**result)
if __name__ == "__main__":
main()
| 21,963
|
Python
|
.py
| 535
| 34.085981
| 131
| 0.673844
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,330
|
ios.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import annotations
import json
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.basic import env_fallback
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
)
from ansible.module_utils.connection import Connection, ConnectionError
_DEVICE_CONFIGS = {}
ios_provider_spec = {
"host": dict(),
"port": dict(type="int"),
"username": dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])),
"password": dict(
fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True
),
"ssh_keyfile": dict(
fallback=(env_fallback, ["ANSIBLE_NET_SSH_KEYFILE"]), type="path"
),
"authorize": dict(
fallback=(env_fallback, ["ANSIBLE_NET_AUTHORIZE"]), type="bool"
),
"auth_pass": dict(
fallback=(env_fallback, ["ANSIBLE_NET_AUTH_PASS"]), no_log=True
),
"timeout": dict(type="int"),
}
ios_argument_spec = {
"provider": dict(
type="dict", options=ios_provider_spec, removed_in_version=2.14
)
}
def get_provider_argspec():
return ios_provider_spec
def get_connection(module):
if hasattr(module, "_ios_connection"):
return module._ios_connection
capabilities = get_capabilities(module)
network_api = capabilities.get("network_api")
if network_api == "cliconf":
module._ios_connection = Connection(module._socket_path)
else:
module.fail_json(msg="Invalid connection type %s" % network_api)
return module._ios_connection
def get_capabilities(module):
if hasattr(module, "_ios_capabilities"):
return module._ios_capabilities
try:
capabilities = Connection(module._socket_path).get_capabilities()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
module._ios_capabilities = json.loads(capabilities)
return module._ios_capabilities
def get_defaults_flag(module):
connection = get_connection(module)
try:
out = connection.get_defaults_flag()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
return to_text(out, errors="surrogate_then_replace").strip()
def get_config(module, flags=None):
flags = to_list(flags)
section_filter = False
if flags and "section" in flags[-1]:
section_filter = True
flag_str = " ".join(flags)
try:
return _DEVICE_CONFIGS[flag_str]
except KeyError:
connection = get_connection(module)
try:
out = connection.get_config(flags=flags)
except ConnectionError as exc:
if section_filter:
# Some ios devices don't understand `| section foo`
out = get_config(module, flags=flags[:-1])
else:
module.fail_json(
msg=to_text(exc, errors="surrogate_then_replace")
)
cfg = to_text(out, errors="surrogate_then_replace").strip()
_DEVICE_CONFIGS[flag_str] = cfg
return cfg
def run_commands(module, commands, check_rc=True):
connection = get_connection(module)
try:
return connection.run_commands(commands=commands, check_rc=check_rc)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def load_config(module, commands):
connection = get_connection(module)
try:
resp = connection.edit_config(commands)
return resp.get("response")
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def normalize_interface(name):
"""Return the normalized interface name
"""
if not name:
return
def _get_number(name):
digits = ""
for char in name:
if char.isdigit() or char in "/.":
digits += char
return digits
if name.lower().startswith("gi"):
if_type = "GigabitEthernet"
elif name.lower().startswith("te"):
if_type = "TenGigabitEthernet"
elif name.lower().startswith("fa"):
if_type = "FastEthernet"
elif name.lower().startswith("fo"):
if_type = "FortyGigabitEthernet"
elif name.lower().startswith("et"):
if_type = "Ethernet"
elif name.lower().startswith("vl"):
if_type = "Vlan"
elif name.lower().startswith("lo"):
if_type = "loopback"
elif name.lower().startswith("po"):
if_type = "port-channel"
elif name.lower().startswith("nv"):
if_type = "nve"
elif name.lower().startswith("twe"):
if_type = "TwentyFiveGigE"
elif name.lower().startswith("hu"):
if_type = "HundredGigE"
else:
if_type = None
number_list = name.split(" ")
if len(number_list) == 2:
if_number = number_list[-1].strip()
else:
if_number = _get_number(name)
if if_type:
proper_interface = if_type + if_number
else:
proper_interface = name
return proper_interface
| 6,672
|
Python
|
.py
| 168
| 33.910714
| 93
| 0.6816
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,331
|
ios.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/terminal/ios.py
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
import json
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils.common.text.converters import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
from ansible.utils.display import Display
display = Display()
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
# re.compile(br"^% \w+", re.M),
re.compile(br"% ?Bad secret"),
re.compile(br"[\r\n%] Bad passwords"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Bad mask", re.I),
re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
re.compile(br"[%\S] ?Error: ?[\s]+", re.I),
re.compile(br"[%\S] ?Informational: ?[\s]+", re.I),
re.compile(br"Command authorization failed"),
]
def on_open_shell(self):
try:
self._exec_cli_command(b"terminal length 0")
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure("unable to set terminal parameters")
try:
self._exec_cli_command(b"terminal width 512")
try:
self._exec_cli_command(b"terminal width 0")
except AnsibleConnectionFailure:
pass
except AnsibleConnectionFailure:
display.display(
"WARNING: Unable to set terminal width, command responses may be truncated"
)
def on_become(self, passwd=None):
if self._get_prompt().endswith(b"#"):
return
cmd = {u"command": u"enable"}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text on both py2 and py3.
cmd[u"prompt"] = to_text(
r"[\r\n]?(?:.*)?[Pp]assword: ?$", errors="surrogate_or_strict"
)
cmd[u"answer"] = passwd
cmd[u"prompt_retry_check"] = True
try:
self._exec_cli_command(
to_bytes(json.dumps(cmd), errors="surrogate_or_strict")
)
prompt = self._get_prompt()
if prompt is None or not prompt.endswith(b"#"):
raise AnsibleConnectionFailure(
"failed to elevate privilege to enable mode still at prompt [%s]"
% prompt
)
except AnsibleConnectionFailure as e:
prompt = self._get_prompt()
raise AnsibleConnectionFailure(
"unable to elevate privilege to enable mode, at prompt [%s] with error: %s"
% (prompt, e.message)
)
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b"(config" in prompt:
self._exec_cli_command(b"end")
self._exec_cli_command(b"disable")
elif prompt.endswith(b"#"):
self._exec_cli_command(b"disable")
| 4,096
|
Python
|
.py
| 99
| 32.666667
| 91
| 0.600452
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,332
|
ios.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r"""options:
provider:
description:
- B(Deprecated)
- 'Starting with Ansible 2.5 we recommend using C(connection: network_cli).'
- For more information please see the L(IOS Platform Options guide, ../network/user_guide/platform_ios.html).
- HORIZONTALLINE
- A dict object containing connection details.
type: dict
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote device
over the specified transport. The value of host is used as the destination
address for the transport.
type: str
required: true
port:
description:
- Specifies the port to use when building the connection to the remote device.
type: int
default: 22
username:
description:
- Configures the username to use to authenticate the connection to the remote
device. This value is used to authenticate the SSH session. If the value
is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME)
will be used instead.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to the remote
device. This value is used to authenticate the SSH session. If the value
is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD)
will be used instead.
type: str
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is exceeded before
the operation is completed, the module will error.
type: int
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to the remote
device. This value is the path to the key used to authenticate the SSH
session. If the value is not specified in the task, the value of environment
variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
type: path
authorize:
description:
- Instructs the module to enter privileged mode on the remote device before
sending any commands. If not specified, the device will attempt to execute
all commands in non-privileged mode. If the value is not specified in the
task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be
used instead.
type: bool
default: false
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode on the
remote device. If I(authorize) is false, then this argument does nothing.
If the value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTH_PASS) will be used instead.
type: str
notes:
- For more information on using Ansible to manage network devices see the :ref:`Ansible
Network Guide <network_guide>`
- For more information on using Ansible to manage Cisco devices see the `Cisco integration
page <https://www.ansible.com/integrations/networks/cisco>`_.
"""
| 3,611
|
Python
|
.py
| 78
| 38.602564
| 113
| 0.689147
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,333
|
connection_base.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/plugin_utils/connection_base.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017, Peter Sprygada <psprygad@redhat.com>
# (c) 2017 Ansible Project
from __future__ import annotations
import os
from ansible import constants as C
from ansible.plugins.connection import ConnectionBase
from ansible.plugins.loader import connection_loader
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
display = Display()
__all__ = ["NetworkConnectionBase"]
BUFSIZE = 65536
class NetworkConnectionBase(ConnectionBase):
"""
A base class for network-style connections.
"""
force_persistence = True
# Do not use _remote_is_local in other connections
_remote_is_local = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(NetworkConnectionBase, self).__init__(
play_context, new_stdin, *args, **kwargs
)
self._messages = []
self._conn_closed = False
self._network_os = self._play_context.network_os
self._local = connection_loader.get("local", play_context, "/dev/null")
self._local.set_options()
self._sub_plugin = {}
self._cached_variables = (None, None, None)
# reconstruct the socket_path and set instance values accordingly
self._ansible_playbook_pid = kwargs.get("ansible_playbook_pid")
self._update_connection_state()
def __getattr__(self, name):
try:
return self.__dict__[name]
except KeyError:
if not name.startswith("_"):
plugin = self._sub_plugin.get("obj")
if plugin:
method = getattr(plugin, name, None)
if method is not None:
return method
raise AttributeError(
"'%s' object has no attribute '%s'"
% (self.__class__.__name__, name)
)
def exec_command(self, cmd, in_data=None, sudoable=True):
return self._local.exec_command(cmd, in_data, sudoable)
def queue_message(self, level, message):
"""
Adds a message to the queue of messages waiting to be pushed back to the controller process.
:arg level: A string which can either be the name of a method in display, or 'log'. When
the messages are returned to task_executor, a value of log will correspond to
``display.display(message, log_only=True)``, while another value will call ``display.[level](message)``
"""
self._messages.append((level, message))
def pop_messages(self):
messages, self._messages = self._messages, []
return messages
def put_file(self, in_path, out_path):
"""Transfer a file from local to remote"""
return self._local.put_file(in_path, out_path)
def fetch_file(self, in_path, out_path):
"""Fetch a file from remote to local"""
return self._local.fetch_file(in_path, out_path)
def reset(self):
"""
Reset the connection
"""
if self._socket_path:
self.queue_message(
"vvvv",
"resetting persistent connection for socket_path %s"
% self._socket_path,
)
self.close()
self.queue_message("vvvv", "reset call on connection instance")
def close(self):
self._conn_closed = True
if self._connected:
self._connected = False
def get_options(self, hostvars=None):
options = super(NetworkConnectionBase, self).get_options(
hostvars=hostvars
)
if (
self._sub_plugin.get("obj")
and self._sub_plugin.get("type") != "external"
):
try:
options.update(
self._sub_plugin["obj"].get_options(hostvars=hostvars)
)
except AttributeError:
pass
return options
def set_options(self, task_keys=None, var_options=None, direct=None):
super(NetworkConnectionBase, self).set_options(
task_keys=task_keys, var_options=var_options, direct=direct
)
if self.get_option("persistent_log_messages"):
warning = (
"Persistent connection logging is enabled for %s. This will log ALL interactions"
% self._play_context.remote_addr
)
logpath = getattr(C, "DEFAULT_LOG_PATH")
if logpath is not None:
warning += " to %s" % logpath
self.queue_message(
"warning",
"%s and WILL NOT redact sensitive configuration like passwords. USE WITH CAUTION!"
% warning,
)
if (
self._sub_plugin.get("obj")
and self._sub_plugin.get("type") != "external"
):
try:
self._sub_plugin["obj"].set_options(
task_keys=task_keys, var_options=var_options, direct=direct
)
except AttributeError:
pass
def _update_connection_state(self):
"""
Reconstruct the connection socket_path and check if it exists
If the socket path exists then the connection is active and set
both the _socket_path value to the path and the _connected value
to True. If the socket path doesn't exist, leave the socket path
value to None and the _connected value to False
"""
ssh = connection_loader.get("ssh", class_only=True)
control_path = ssh._create_control_path(
self._play_context.remote_addr,
self._play_context.port,
self._play_context.remote_user,
self._play_context.connection,
self._ansible_playbook_pid,
)
tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
socket_path = unfrackpath(control_path % dict(directory=tmp_path))
if os.path.exists(socket_path):
self._connected = True
self._socket_path = socket_path
def _log_messages(self, message):
if self.get_option("persistent_log_messages"):
self.queue_message("log", message)
| 6,291
|
Python
|
.py
| 150
| 31.646667
| 115
| 0.597675
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,334
|
cli_config.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/cli_config.py
|
#
# Copyright 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
from ansible_collections.ansible.netcommon.plugins.action.network import (
ActionModule as ActionNetworkModule,
)
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True
if self._play_context.connection.split(".")[-1] != "network_cli":
return {
"failed": True,
"msg": "Connection type %s is not valid for cli_config module"
% self._play_context.connection,
}
return super(ActionModule, self).run(task_vars=task_vars)
| 1,351
|
Python
|
.py
| 33
| 36.363636
| 78
| 0.711128
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,335
|
network.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/network.py
|
#
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
import os
import time
import re
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_text, to_bytes
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.utils.display import Display
display = Display()
PRIVATE_KEYS_RE = re.compile("__.+__")
class ActionModule(_ActionModule):
def run(self, task_vars=None):
config_module = hasattr(self, "_config_module") and self._config_module
if config_module and self._task.args.get("src"):
try:
self._handle_src_option()
except AnsibleError as e:
return {"failed": True, "msg": e.message, "changed": False}
result = super(ActionModule, self).run(task_vars=task_vars)
if (
config_module
and self._task.args.get("backup")
and not result.get("failed")
):
self._handle_backup_option(result, task_vars)
return result
def _handle_backup_option(self, result, task_vars):
filename = None
backup_path = None
try:
content = result["__backup__"]
except KeyError:
raise AnsibleError("Failed while reading configuration backup")
backup_options = self._task.args.get("backup_options")
if backup_options:
filename = backup_options.get("filename")
backup_path = backup_options.get("dir_path")
if not backup_path:
cwd = self._get_working_path()
backup_path = os.path.join(cwd, "backup")
if not filename:
tstamp = time.strftime(
"%Y-%m-%d@%H:%M:%S", time.localtime(time.time())
)
filename = "%s_config.%s" % (
task_vars["inventory_hostname"],
tstamp,
)
dest = os.path.join(backup_path, filename)
backup_path = os.path.expanduser(
os.path.expandvars(
to_bytes(backup_path, errors="surrogate_or_strict")
)
)
if not os.path.exists(backup_path):
os.makedirs(backup_path)
new_task = self._task.copy()
for item in self._task.args:
if not item.startswith("_"):
new_task.args.pop(item, None)
new_task.args.update(dict(content=content, dest=dest))
copy_action = self._shared_loader_obj.action_loader.get(
"copy",
task=new_task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=self._templar,
shared_loader_obj=self._shared_loader_obj,
)
copy_result = copy_action.run(task_vars=task_vars)
if copy_result.get("failed"):
result["failed"] = copy_result["failed"]
result["msg"] = copy_result.get("msg")
return
result["backup_path"] = dest
if copy_result.get("changed", False):
result["changed"] = copy_result["changed"]
if backup_options and backup_options.get("filename"):
result["date"] = time.strftime(
"%Y-%m-%d",
time.gmtime(os.stat(result["backup_path"]).st_ctime),
)
result["time"] = time.strftime(
"%H:%M:%S",
time.gmtime(os.stat(result["backup_path"]).st_ctime),
)
else:
result["date"] = tstamp.split("@")[0]
result["time"] = tstamp.split("@")[1]
result["shortname"] = result["backup_path"][::-1].split(".", 1)[1][
::-1
]
result["filename"] = result["backup_path"].split("/")[-1]
# strip out any keys that have two leading and two trailing
# underscore characters
for key in list(result.keys()):
if PRIVATE_KEYS_RE.match(key):
del result[key]
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _handle_src_option(self, convert_data=True):
src = self._task.args.get("src")
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit("src").scheme:
source = src
else:
source = self._loader.path_dwim_relative(
working_path, "templates", src
)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise AnsibleError("path specified in src not found")
try:
with open(source, "r") as f:
template_data = to_text(f.read())
except IOError as e:
raise AnsibleError(
"unable to load src file {0}, I/O error({1}): {2}".format(
source, e.errno, e.strerror
)
)
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
with self._templar.set_temporary_context(searchpath=searchpath):
self._task.args["src"] = self._templar.template(
template_data, convert_data=convert_data
)
def _get_network_os(self, task_vars):
if "network_os" in self._task.args and self._task.args["network_os"]:
display.vvvv("Getting network OS from task argument")
network_os = self._task.args["network_os"]
elif self._play_context.network_os:
display.vvvv("Getting network OS from inventory")
network_os = self._play_context.network_os
elif (
"network_os" in task_vars.get("ansible_facts", {})
and task_vars["ansible_facts"]["network_os"]
):
display.vvvv("Getting network OS from fact")
network_os = task_vars["ansible_facts"]["network_os"]
else:
raise AnsibleError(
"ansible_network_os must be specified on this host"
)
return network_os
| 7,429
|
Python
|
.py
| 178
| 31.376404
| 79
| 0.583576
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,336
|
net_get.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_get.py
|
# (c) 2018, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import re
import uuid
import hashlib
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_text, to_bytes
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.plugins.action import ActionBase
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
socket_path = None
self._get_network_os(task_vars)
persistent_connection = self._play_context.connection.split(".")[-1]
result = super(ActionModule, self).run(task_vars=task_vars)
if persistent_connection != "network_cli":
# It is supported only with network_cli
result["failed"] = True
result["msg"] = (
"connection type %s is not valid for net_get module,"
" please use fully qualified name of network_cli connection type"
% self._play_context.connection
)
return result
try:
src = self._task.args["src"]
except KeyError as exc:
return {
"failed": True,
"msg": "missing required argument: %s" % exc,
}
# Get destination file if specified
dest = self._task.args.get("dest")
if dest is None:
dest = self._get_default_dest(src)
else:
dest = self._handle_dest_path(dest)
# Get proto
proto = self._task.args.get("protocol")
if proto is None:
proto = "scp"
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
sock_timeout = conn.get_option("persistent_command_timeout")
try:
changed = self._handle_existing_file(
conn, src, dest, proto, sock_timeout
)
if changed is False:
result["changed"] = changed
result["destination"] = dest
return result
except Exception as exc:
result["msg"] = (
"Warning: %s idempotency check failed. Check dest" % exc
)
try:
conn.get_file(
source=src, destination=dest, proto=proto, timeout=sock_timeout
)
except Exception as exc:
result["failed"] = True
result["msg"] = "Exception received: %s" % exc
result["changed"] = changed
result["destination"] = dest
return result
def _handle_dest_path(self, dest):
working_path = self._get_working_path()
if os.path.isabs(dest) or urlsplit("dest").scheme:
dst = dest
else:
dst = self._loader.path_dwim_relative(working_path, "", dest)
return dst
def _get_src_filename_from_path(self, src_path):
filename_list = re.split("/|:", src_path)
return filename_list[-1]
def _get_default_dest(self, src_path):
dest_path = self._get_working_path()
src_fname = self._get_src_filename_from_path(src_path)
filename = "%s/%s" % (dest_path, src_fname)
return filename
def _handle_existing_file(self, conn, source, dest, proto, timeout):
"""
Determines whether the source and destination file match.
:return: False if source and dest both exist and have matching sha1 sums, True otherwise.
"""
if not os.path.exists(dest):
return True
cwd = self._loader.get_basedir()
filename = str(uuid.uuid4())
tmp_dest_file = os.path.join(cwd, filename)
try:
conn.get_file(
source=source,
destination=tmp_dest_file,
proto=proto,
timeout=timeout,
)
except ConnectionError as exc:
error = to_text(exc)
if error.endswith("No such file or directory"):
if os.path.exists(tmp_dest_file):
os.remove(tmp_dest_file)
return True
try:
with open(tmp_dest_file, "r") as f:
new_content = f.read()
with open(dest, "r") as f:
old_content = f.read()
except (IOError, OSError):
os.remove(tmp_dest_file)
raise
sha1 = hashlib.sha1()
old_content_b = to_bytes(old_content, errors="surrogate_or_strict")
sha1.update(old_content_b)
checksum_old = sha1.digest()
sha1 = hashlib.sha1()
new_content_b = to_bytes(new_content, errors="surrogate_or_strict")
sha1.update(new_content_b)
checksum_new = sha1.digest()
os.remove(tmp_dest_file)
if checksum_old == checksum_new:
return False
return True
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _get_network_os(self, task_vars):
if "network_os" in self._task.args and self._task.args["network_os"]:
display.vvvv("Getting network OS from task argument")
network_os = self._task.args["network_os"]
elif self._play_context.network_os:
display.vvvv("Getting network OS from inventory")
network_os = self._play_context.network_os
elif (
"network_os" in task_vars.get("ansible_facts", {})
and task_vars["ansible_facts"]["network_os"]
):
display.vvvv("Getting network OS from fact")
network_os = task_vars["ansible_facts"]["network_os"]
else:
raise AnsibleError(
"ansible_network_os must be specified on this host"
)
return network_os
| 6,673
|
Python
|
.py
| 167
| 30.245509
| 97
| 0.59861
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,337
|
net_put.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_put.py
|
# (c) 2018, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import uuid
import hashlib
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_text, to_bytes
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.plugins.action import ActionBase
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
socket_path = None
network_os = self._get_network_os(task_vars).split(".")[-1]
persistent_connection = self._play_context.connection.split(".")[-1]
result = super(ActionModule, self).run(task_vars=task_vars)
if persistent_connection != "network_cli":
# It is supported only with network_cli
result["failed"] = True
result["msg"] = (
"connection type %s is not valid for net_put module,"
" please use fully qualified name of network_cli connection type"
% self._play_context.connection
)
return result
try:
src = self._task.args["src"]
except KeyError as exc:
return {
"failed": True,
"msg": "missing required argument: %s" % exc,
}
src_file_path_name = src
# Get destination file if specified
dest = self._task.args.get("dest")
# Get proto
proto = self._task.args.get("protocol")
if proto is None:
proto = "scp"
# Get mode if set
mode = self._task.args.get("mode")
if mode is None:
mode = "binary"
if mode == "text":
try:
self._handle_template(convert_data=False)
except ValueError as exc:
return dict(failed=True, msg=to_text(exc))
# Now src has resolved file write to disk in current directory for scp
src = self._task.args.get("src")
filename = str(uuid.uuid4())
cwd = self._loader.get_basedir()
output_file = os.path.join(cwd, filename)
try:
with open(output_file, "wb") as f:
f.write(to_bytes(src, encoding="utf-8"))
except Exception:
os.remove(output_file)
raise
else:
try:
output_file = self._get_binary_src_file(src)
except ValueError as exc:
return dict(failed=True, msg=to_text(exc))
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
sock_timeout = conn.get_option("persistent_command_timeout")
if dest is None:
dest = src_file_path_name
try:
changed = self._handle_existing_file(
conn, output_file, dest, proto, sock_timeout
)
if changed is False:
result["changed"] = changed
result["destination"] = dest
return result
except Exception as exc:
result["msg"] = (
"Warning: %s idempotency check failed. Check dest" % exc
)
try:
conn.copy_file(
source=output_file,
destination=dest,
proto=proto,
timeout=sock_timeout,
)
except Exception as exc:
if to_text(exc) == "No response from server":
if network_os == "iosxr":
# IOSXR sometimes closes socket prematurely after completion
# of file transfer
result[
"msg"
] = "Warning: iosxr scp server pre close issue. Please check dest"
else:
result["failed"] = True
result["msg"] = "Exception received: %s" % exc
if mode == "text":
# Cleanup tmp file expanded with ansible vars
os.remove(output_file)
result["changed"] = changed
result["destination"] = dest
return result
def _handle_existing_file(self, conn, source, dest, proto, timeout):
"""
Determines whether the source and destination file match.
:return: False if source and dest both exist and have matching sha1 sums, True otherwise.
"""
cwd = self._loader.get_basedir()
filename = str(uuid.uuid4())
tmp_source_file = os.path.join(cwd, filename)
try:
conn.get_file(
source=dest,
destination=tmp_source_file,
proto=proto,
timeout=timeout,
)
except ConnectionError as exc:
error = to_text(exc)
if error.endswith("No such file or directory"):
if os.path.exists(tmp_source_file):
os.remove(tmp_source_file)
return True
try:
with open(source, "r") as f:
new_content = f.read()
with open(tmp_source_file, "r") as f:
old_content = f.read()
except (IOError, OSError):
os.remove(tmp_source_file)
raise
sha1 = hashlib.sha1()
old_content_b = to_bytes(old_content, errors="surrogate_or_strict")
sha1.update(old_content_b)
checksum_old = sha1.digest()
sha1 = hashlib.sha1()
new_content_b = to_bytes(new_content, errors="surrogate_or_strict")
sha1.update(new_content_b)
checksum_new = sha1.digest()
os.remove(tmp_source_file)
if checksum_old == checksum_new:
return False
return True
def _get_binary_src_file(self, src):
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit("src").scheme:
source = src
else:
source = self._loader.path_dwim_relative(
working_path, "templates", src
)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError("path specified in src not found")
return source
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _get_network_os(self, task_vars):
if "network_os" in self._task.args and self._task.args["network_os"]:
display.vvvv("Getting network OS from task argument")
network_os = self._task.args["network_os"]
elif self._play_context.network_os:
display.vvvv("Getting network OS from inventory")
network_os = self._play_context.network_os
elif (
"network_os" in task_vars.get("ansible_facts", {})
and task_vars["ansible_facts"]["network_os"]
):
display.vvvv("Getting network OS from fact")
network_os = task_vars["ansible_facts"]["network_os"]
else:
raise AnsibleError(
"ansible_network_os must be specified on this host"
)
return network_os
| 8,073
|
Python
|
.py
| 200
| 29.395
| 97
| 0.576094
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,338
|
network_cli.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/network_cli.py
|
# (c) 2016 Red Hat Inc.
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
author:
- Ansible Networking Team (@ansible-network)
name: network_cli
short_description: Use network_cli to run command on network appliances
description:
- This connection plugin provides a connection to remote devices over the SSH and
implements a CLI shell. This connection plugin is typically used by network devices
for sending and receiving CLi commands to network devices.
version_added: 1.0.0
requirements:
- ansible-pylibssh if using I(ssh_type=libssh)
extends_documentation_fragment:
- ansible.netcommon.connection_persistent
options:
host:
description:
- Specifies the remote device FQDN or IP address to establish the SSH connection
to.
default: inventory_hostname
vars:
- name: inventory_hostname
- name: ansible_host
port:
type: int
description:
- Specifies the port on the remote device that listens for connections when establishing
the SSH connection.
default: 22
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
network_os:
description:
- Configures the device platform network operating system. This value is used
to load the correct terminal and cliconf plugins to communicate with the remote
device.
vars:
- name: ansible_network_os
remote_user:
description:
- The username used to authenticate to the remote device when the SSH connection
is first established. If the remote_user is not specified, the connection will
use the username of the logged in user.
- Can be configured from the CLI via the C(--user) or C(-u) options.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
password:
description:
- Configures the user password used to authenticate to the remote device when
first establishing the SSH connection.
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
private_key_file:
description:
- The private SSH key or certificate file used to authenticate to the remote device
when first establishing the SSH connection.
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
become:
type: boolean
description:
- The become option will instruct the CLI session to attempt privilege escalation
on platforms that support it. Normally this means transitioning from user mode
to C(enable) mode in the CLI session. If become is set to True and the remote
device does not support privilege escalation or the privilege has already been
elevated, then this option is silently ignored.
- Can be configured from the CLI via the C(--become) or C(-b) options.
default: false
ini:
- section: privilege_escalation
key: become
env:
- name: ANSIBLE_BECOME
vars:
- name: ansible_become
become_errors:
type: str
description:
- This option determines how privilege escalation failures are handled when
I(become) is enabled.
- When set to C(ignore), the errors are silently ignored.
When set to C(warn), a warning message is displayed.
The default option C(fail), triggers a failure and halts execution.
vars:
- name: ansible_network_become_errors
default: fail
choices: ["ignore", "warn", "fail"]
terminal_errors:
type: str
description:
- This option determines how failures while setting terminal parameters
are handled.
- When set to C(ignore), the errors are silently ignored.
When set to C(warn), a warning message is displayed.
The default option C(fail), triggers a failure and halts execution.
vars:
- name: ansible_network_terminal_errors
default: fail
choices: ["ignore", "warn", "fail"]
version_added: 3.1.0
become_method:
description:
- This option allows the become method to be specified in for handling privilege
escalation. Typically the become_method value is set to C(enable) but could
be defined as other values.
default: sudo
ini:
- section: privilege_escalation
key: become_method
env:
- name: ANSIBLE_BECOME_METHOD
vars:
- name: ansible_become_method
host_key_auto_add:
type: boolean
description:
- By default, Ansible will prompt the user before adding SSH keys to the known
hosts file. Since persistent connections such as network_cli run in background
processes, the user will never be prompted. By enabling this option, unknown
host keys will automatically be added to the known hosts file.
- Be sure to fully understand the security implications of enabling this option
on production systems as it could create a security vulnerability.
default: false
ini:
- section: paramiko_connection
key: host_key_auto_add
env:
- name: ANSIBLE_HOST_KEY_AUTO_ADD
persistent_buffer_read_timeout:
type: float
description:
- Configures, in seconds, the amount of time to wait for the data to be read from
Paramiko channel after the command prompt is matched. This timeout value ensures
that command prompt matched is correct and there is no more data left to be
received from remote host.
default: 0.1
ini:
- section: persistent_connection
key: buffer_read_timeout
env:
- name: ANSIBLE_PERSISTENT_BUFFER_READ_TIMEOUT
vars:
- name: ansible_buffer_read_timeout
terminal_stdout_re:
type: list
elements: dict
description:
- A single regex pattern or a sequence of patterns along with optional flags to
match the command prompt from the received response chunk. This option accepts
C(pattern) and C(flags) keys. The value of C(pattern) is a python regex pattern
to match the response and the value of C(flags) is the value accepted by I(flags)
argument of I(re.compile) python method to control the way regex is matched
with the response, for example I('re.I').
vars:
- name: ansible_terminal_stdout_re
terminal_stderr_re:
type: list
elements: dict
description:
- This option provides the regex pattern and optional flags to match the error
string from the received response chunk. This option accepts C(pattern) and
C(flags) keys. The value of C(pattern) is a python regex pattern to match the
response and the value of C(flags) is the value accepted by I(flags) argument
of I(re.compile) python method to control the way regex is matched with the
response, for example I('re.I').
vars:
- name: ansible_terminal_stderr_re
terminal_initial_prompt:
type: list
elements: string
description:
- A single regex pattern or a sequence of patterns to evaluate the expected prompt
at the time of initial login to the remote host.
vars:
- name: ansible_terminal_initial_prompt
terminal_initial_answer:
type: list
elements: string
description:
- The answer to reply with if the C(terminal_initial_prompt) is matched. The value
can be a single answer or a list of answers for multiple terminal_initial_prompt.
In case the login menu has multiple prompts the sequence of the prompt and excepted
answer should be in same order and the value of I(terminal_prompt_checkall)
should be set to I(True) if all the values in C(terminal_initial_prompt) are
expected to be matched and set to I(False) if any one login prompt is to be
matched.
vars:
- name: ansible_terminal_initial_answer
terminal_initial_prompt_checkall:
type: boolean
description:
- By default the value is set to I(False) and any one of the prompts mentioned
in C(terminal_initial_prompt) option is matched it won't check for other prompts.
When set to I(True) it will check for all the prompts mentioned in C(terminal_initial_prompt)
option in the given order and all the prompts should be received from remote
host if not it will result in timeout.
default: false
vars:
- name: ansible_terminal_initial_prompt_checkall
terminal_inital_prompt_newline:
type: boolean
description:
- This boolean flag, that when set to I(True) will send newline in the response
if any of values in I(terminal_initial_prompt) is matched.
default: true
vars:
- name: ansible_terminal_initial_prompt_newline
network_cli_retries:
description:
- Number of attempts to connect to remote host. The delay time between the retires
increases after every attempt by power of 2 in seconds till either the maximum
attempts are exhausted or any of the C(persistent_command_timeout) or C(persistent_connect_timeout)
timers are triggered.
default: 3
type: integer
env:
- name: ANSIBLE_NETWORK_CLI_RETRIES
ini:
- section: persistent_connection
key: network_cli_retries
vars:
- name: ansible_network_cli_retries
ssh_type:
description:
- The python package that will be used by the C(network_cli) connection plugin to create a SSH connection to remote host.
- I(libssh) will use the ansible-pylibssh package, which needs to be installed in order to work.
- I(paramiko) will instead use the paramiko package to manage the SSH connection.
- I(auto) will use ansible-pylibssh if that package is installed, otherwise will fallback to paramiko.
default: auto
choices: ["libssh", "paramiko", "auto"]
env:
- name: ANSIBLE_NETWORK_CLI_SSH_TYPE
ini:
- section: persistent_connection
key: ssh_type
vars:
- name: ansible_network_cli_ssh_type
host_key_checking:
description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host'
type: boolean
default: True
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
ini:
- section: defaults
key: host_key_checking
- section: persistent_connection
key: host_key_checking
vars:
- name: ansible_host_key_checking
- name: ansible_ssh_host_key_checking
single_user_mode:
type: boolean
default: false
version_added: 2.0.0
description:
- This option enables caching of data fetched from the target for re-use.
The cache is invalidated when the target device enters configuration mode.
- Applicable only for platforms where this has been implemented.
env:
- name: ANSIBLE_NETWORK_SINGLE_USER_MODE
vars:
- name: ansible_network_single_user_mode
"""
import getpass
import json
import logging
import os
import re
import signal
import socket
import time
import traceback
from functools import wraps
from io import BytesIO
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves import cPickle
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import (
cache_loader,
cliconf_loader,
connection_loader,
terminal_loader,
)
from ansible.utils.display import Display
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
)
from ansible_collections.ansible.netcommon.plugins.plugin_utils.connection_base import (
NetworkConnectionBase,
)
try:
from scp import SCPClient
HAS_SCP = True
except ImportError:
HAS_SCP = False
HAS_PYLIBSSH = False
display = Display()
def ensure_connect(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not self._connected:
self._connect()
self.update_cli_prompt_context()
return func(self, *args, **kwargs)
return wrapped
class AnsibleCmdRespRecv(Exception):
pass
class Connection(NetworkConnectionBase):
"""CLI (shell) SSH connections on Paramiko"""
transport = "ansible.netcommon.network_cli"
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(
play_context, new_stdin, *args, **kwargs
)
self._ssh_shell = None
self._matched_prompt = None
self._matched_cmd_prompt = None
self._matched_pattern = None
self._last_response = None
self._history = list()
self._command_response = None
self._last_recv_window = None
self._cache = None
self._terminal = None
self.cliconf = None
# Managing prompt context
self._check_prompt = False
self._task_uuid = to_text(kwargs.get("task_uuid", ""))
self._ssh_type_conn = None
self._ssh_type = None
self._single_user_mode = False
if self._network_os:
self._terminal = terminal_loader.get(self._network_os, self)
if not self._terminal:
raise AnsibleConnectionFailure(
"network os %s is not supported" % self._network_os
)
self.cliconf = cliconf_loader.get(self._network_os, self)
if self.cliconf:
self._sub_plugin = {
"type": "cliconf",
"name": self.cliconf._load_name,
"obj": self.cliconf,
}
self.queue_message(
"vvvv",
"loaded cliconf plugin %s from path %s for network_os %s"
% (
self.cliconf._load_name,
self.cliconf._original_path,
self._network_os,
),
)
else:
self.queue_message(
"vvvv",
"unable to load cliconf for network_os %s"
% self._network_os,
)
else:
raise AnsibleConnectionFailure(
"Unable to automatically determine host network os. Please "
"manually configure ansible_network_os value for this host"
)
self.queue_message("log", "network_os is set to %s" % self._network_os)
@property
def ssh_type(self):
if self._ssh_type is None:
self._ssh_type = self.get_option("ssh_type")
self.queue_message(
"vvvv", "ssh type is set to %s" % self._ssh_type
)
# Support autodetection of supported library
if self._ssh_type == "auto":
self.queue_message("vvvv", "autodetecting ssh_type")
if HAS_PYLIBSSH:
self._ssh_type = "libssh"
else:
self.queue_message(
"warning",
"ansible-pylibssh not installed, falling back to paramiko",
)
self._ssh_type = "paramiko"
self.queue_message(
"vvvv", "ssh type is now set to %s" % self._ssh_type
)
if self._ssh_type not in ["paramiko", "libssh"]:
raise AnsibleConnectionFailure(
"Invalid value '%s' set for ssh_type option."
" Expected value is either 'libssh' or 'paramiko'"
% self._ssh_type
)
return self._ssh_type
@property
def ssh_type_conn(self):
if self._ssh_type_conn is None:
if self.ssh_type == "libssh":
connection_plugin = "ansible.netcommon.libssh"
elif self.ssh_type == "paramiko":
# NOTE: This MUST be paramiko or things will break
connection_plugin = "paramiko"
else:
raise AnsibleConnectionFailure(
"Invalid value '%s' set for ssh_type option."
" Expected value is either 'libssh' or 'paramiko'"
% self._ssh_type
)
self._ssh_type_conn = connection_loader.get(
connection_plugin, self._play_context, "/dev/null"
)
return self._ssh_type_conn
# To maintain backward compatibility
@property
def paramiko_conn(self):
return self.ssh_type_conn
def _get_log_channel(self):
name = "p=%s u=%s | " % (os.getpid(), getpass.getuser())
name += "%s [%s]" % (self.ssh_type, self._play_context.remote_addr)
return name
@ensure_connect
def get_prompt(self):
"""Returns the current prompt from the device"""
return self._matched_prompt
def exec_command(self, cmd, in_data=None, sudoable=True):
# this try..except block is just to handle the transition to supporting
# network_cli as a toplevel connection. Once connection=local is gone,
# this block can be removed as well and all calls passed directly to
# the local connection
if self._ssh_shell:
try:
cmd = json.loads(to_text(cmd, errors="surrogate_or_strict"))
kwargs = {
"command": to_bytes(
cmd["command"], errors="surrogate_or_strict"
)
}
for key in (
"prompt",
"answer",
"sendonly",
"newline",
"prompt_retry_check",
):
if cmd.get(key) is True or cmd.get(key) is False:
kwargs[key] = cmd[key]
elif cmd.get(key) is not None:
kwargs[key] = to_bytes(
cmd[key], errors="surrogate_or_strict"
)
return self.send(**kwargs)
except ValueError:
cmd = to_bytes(cmd, errors="surrogate_or_strict")
return self.send(command=cmd)
else:
return super(Connection, self).exec_command(cmd, in_data, sudoable)
def get_options(self, hostvars=None):
options = super(Connection, self).get_options(hostvars=hostvars)
options.update(self.ssh_type_conn.get_options(hostvars=hostvars))
return options
def set_options(self, task_keys=None, var_options=None, direct=None):
super(Connection, self).set_options(
task_keys=task_keys, var_options=var_options, direct=direct
)
self.ssh_type_conn.set_options(
task_keys=task_keys, var_options=var_options, direct=direct
)
# Retain old look_for_keys behaviour, but only if not set
if not any(
[
task_keys and ("look_for_keys" in task_keys),
var_options and ("look_for_keys" in var_options),
direct and ("look_for_keys" in direct),
]
):
look_for_keys = not bool(
self.get_option("password")
and not self.get_option("private_key_file")
)
if not look_for_keys:
# This actually can't be overridden yet without changes in ansible-core
# TODO: Uncomment when appropriate
# self.queue_message(
# "warning",
# "Option look_for_keys has been implicitly set to {0} because "
# "it was not set explicitly. This is retained to maintain "
# "backwards compatibility with the old behavior. This behavior "
# "will be removed in some release after 2024-01-01".format(
# look_for_keys
# ),
# )
self.ssh_type_conn.set_option("look_for_keys", look_for_keys)
def update_play_context(self, pc_data):
"""Updates the play context information for the connection"""
pc_data = to_bytes(pc_data)
if PY3:
pc_data = cPickle.loads(pc_data, encoding="bytes")
else:
pc_data = cPickle.loads(pc_data)
play_context = PlayContext()
play_context.deserialize(pc_data)
self.queue_message("vvvv", "updating play_context for connection")
if self._play_context.become ^ play_context.become:
if play_context.become is True:
auth_pass = play_context.become_pass
self._on_become(become_pass=auth_pass)
self.queue_message("vvvv", "authorizing connection")
else:
self._terminal.on_unbecome()
self.queue_message("vvvv", "deauthorizing connection")
self._play_context = play_context
if self._ssh_type_conn is not None:
# TODO: This works, but is not really ideal. We would rather use
# set_options, but then we need more custom handling in that
# method.
self._ssh_type_conn._play_context = play_context
if hasattr(self, "reset_history"):
self.reset_history()
if hasattr(self, "disable_response_logging"):
self.disable_response_logging()
self._single_user_mode = self.get_option("single_user_mode")
def set_check_prompt(self, task_uuid):
self._check_prompt = task_uuid
def update_cli_prompt_context(self):
# set cli prompt context at the start of new task run only
if self._check_prompt and self._task_uuid != self._check_prompt:
self._task_uuid, self._check_prompt = self._check_prompt, False
self.set_cli_prompt_context()
def _connect(self):
"""
Connects to the remote device and starts the terminal
"""
if display.verbosity > 3:
logging.getLogger(self.ssh_type).setLevel(logging.DEBUG)
self.queue_message(
"vvvv", "invoked shell using ssh_type: %s" % self.ssh_type
)
self._single_user_mode = self.get_option("single_user_mode")
if not self.connected:
self.ssh_type_conn._set_log_channel(self._get_log_channel())
self.ssh_type_conn.force_persistence = self.force_persistence
command_timeout = self.get_option("persistent_command_timeout")
max_pause = min(
[
self.get_option("persistent_connect_timeout"),
command_timeout,
]
)
retries = self.get_option("network_cli_retries")
total_pause = 0
for attempt in range(retries + 1):
try:
ssh = self.ssh_type_conn._connect()
break
except AnsibleError:
raise
except Exception as e:
pause = 2 ** (attempt + 1)
if attempt == retries or total_pause >= max_pause:
raise AnsibleConnectionFailure(
to_text(e, errors="surrogate_or_strict")
)
else:
msg = (
"network_cli_retry: attempt: %d, caught exception(%s), "
"pausing for %d seconds"
% (
attempt + 1,
to_text(e, errors="surrogate_or_strict"),
pause,
)
)
self.queue_message("vv", msg)
time.sleep(pause)
total_pause += pause
continue
self.queue_message("vvvv", "ssh connection done, setting terminal")
self._connected = True
self._ssh_shell = ssh.ssh.invoke_shell()
if self.ssh_type == "paramiko":
self._ssh_shell.settimeout(command_timeout)
self.queue_message(
"vvvv",
"loaded terminal plugin for network_os %s" % self._network_os,
)
terminal_initial_prompt = (
self.get_option("terminal_initial_prompt")
or self._terminal.terminal_initial_prompt
)
terminal_initial_answer = (
self.get_option("terminal_initial_answer")
or self._terminal.terminal_initial_answer
)
newline = (
self.get_option("terminal_inital_prompt_newline")
or self._terminal.terminal_inital_prompt_newline
)
check_all = (
self.get_option("terminal_initial_prompt_checkall") or False
)
self.receive(
prompts=terminal_initial_prompt,
answer=terminal_initial_answer,
newline=newline,
check_all=check_all,
)
if self._play_context.become:
self.queue_message("vvvv", "firing event: on_become")
auth_pass = self._play_context.become_pass
self._on_become(become_pass=auth_pass)
self.queue_message("vvvv", "firing event: on_open_shell()")
self._on_open_shell()
self.queue_message(
"vvvv", "ssh connection has completed successfully"
)
return self
def _on_become(self, become_pass=None):
"""
Wraps terminal.on_become() to handle
privilege escalation failures based on user preference
"""
on_become_error = self.get_option("become_errors")
try:
self._terminal.on_become(passwd=become_pass)
except AnsibleConnectionFailure:
if on_become_error == "ignore":
pass
elif on_become_error == "warn":
self.queue_message(
"warning", "on_become: privilege escalation failed"
)
else:
raise
def _on_open_shell(self):
"""
Wraps terminal.on_open_shell() to handle
terminal setting failures based on user preference
"""
on_terminal_error = self.get_option("terminal_errors")
try:
self._terminal.on_open_shell()
except AnsibleConnectionFailure:
if on_terminal_error == "ignore":
pass
elif on_terminal_error == "warn":
self.queue_message(
"warning",
"on_open_shell: failed to set terminal parameters",
)
else:
raise
def close(self):
"""
Close the active connection to the device
"""
# only close the connection if its connected.
if self._connected:
self.queue_message("debug", "closing ssh connection to device")
if self._ssh_shell:
self.queue_message("debug", "firing event: on_close_shell()")
self._terminal.on_close_shell()
self._ssh_shell.close()
self._ssh_shell = None
self.queue_message("debug", "cli session is now closed")
self.ssh_type_conn.close()
self._ssh_type_conn = None
self.queue_message(
"debug", "ssh connection has been closed successfully"
)
super(Connection, self).close()
def _read_post_command_prompt_match(self):
time.sleep(self.get_option("persistent_buffer_read_timeout"))
data = self._ssh_shell.read_bulk_response()
return data if data else None
def receive_paramiko(
self,
command=None,
prompts=None,
answer=None,
newline=True,
prompt_retry_check=False,
check_all=False,
strip_prompt=True,
):
recv = BytesIO()
cache_socket_timeout = self.get_option("persistent_command_timeout")
self._ssh_shell.settimeout(cache_socket_timeout)
command_prompt_matched = False
handled = False
errored_response = None
while True:
if command_prompt_matched:
try:
signal.signal(
signal.SIGALRM, self._handle_buffer_read_timeout
)
signal.setitimer(
signal.ITIMER_REAL, self._buffer_read_timeout
)
data = self._ssh_shell.recv(256)
signal.alarm(0)
self._log_messages(
"response-%s: %s" % (self._window_count + 1, data)
)
# if data is still received on channel it indicates the prompt string
# is wrongly matched in between response chunks, continue to read
# remaining response.
command_prompt_matched = False
# restart command_timeout timer
signal.signal(signal.SIGALRM, self._handle_command_timeout)
signal.alarm(self._command_timeout)
except AnsibleCmdRespRecv:
# reset socket timeout to global timeout
return self._command_response
else:
data = self._ssh_shell.recv(256)
self._log_messages(
"response-%s: %s" % (self._window_count + 1, data)
)
# when a channel stream is closed, received data will be empty
if not data:
break
recv.write(data)
offset = recv.tell() - 256 if recv.tell() > 256 else 0
recv.seek(offset)
window = self._strip(recv.read())
self._last_recv_window = window
self._window_count += 1
if prompts and not handled:
handled = self._handle_prompt(
window, prompts, answer, newline, False, check_all
)
self._matched_prompt_window = self._window_count
elif (
prompts
and handled
and prompt_retry_check
and self._matched_prompt_window + 1 == self._window_count
):
# check again even when handled, if same prompt repeats in next window
# (like in the case of a wrong enable password, etc) indicates
# value of answer is wrong, report this as error.
if self._handle_prompt(
window,
prompts,
answer,
newline,
prompt_retry_check,
check_all,
):
raise AnsibleConnectionFailure(
"For matched prompt '%s', answer is not valid"
% self._matched_cmd_prompt
)
if self._find_error(window):
# We can't exit here, as we need to drain the buffer in case
# the error isn't fatal, and will be using the buffer again
errored_response = window
if self._find_prompt(window):
if errored_response:
raise AnsibleConnectionFailure(errored_response)
self._last_response = recv.getvalue()
resp = self._strip(self._last_response)
self._command_response = self._sanitize(
resp, command, strip_prompt
)
if self._buffer_read_timeout == 0.0:
# reset socket timeout to global timeout
return self._command_response
else:
command_prompt_matched = True
def receive_libssh(
self,
command=None,
prompts=None,
answer=None,
newline=True,
prompt_retry_check=False,
check_all=False,
strip_prompt=True,
):
self._command_response = resp = b""
command_prompt_matched = False
handled = False
errored_response = None
while True:
if command_prompt_matched:
data = self._read_post_command_prompt_match()
if data:
command_prompt_matched = False
else:
return self._command_response
else:
try:
data = self._ssh_shell.read_bulk_response()
# TODO: Should be ConnectionError when pylibssh drops Python 2 support
except OSError:
# Socket has closed
break
if not data:
continue
self._last_recv_window = self._strip(data)
resp += self._last_recv_window
self._window_count += 1
self._log_messages("response-%s: %s" % (self._window_count, data))
if prompts and not handled:
handled = self._handle_prompt(
resp, prompts, answer, newline, False, check_all
)
self._matched_prompt_window = self._window_count
elif (
prompts
and handled
and prompt_retry_check
and self._matched_prompt_window + 1 == self._window_count
):
# check again even when handled, if same prompt repeats in next window
# (like in the case of a wrong enable password, etc) indicates
# value of answer is wrong, report this as error.
if self._handle_prompt(
resp,
prompts,
answer,
newline,
prompt_retry_check,
check_all,
):
raise AnsibleConnectionFailure(
"For matched prompt '%s', answer is not valid"
% self._matched_cmd_prompt
)
if self._find_error(resp):
# We can't exit here, as we need to drain the buffer in case
# the error isn't fatal, and will be using the buffer again
errored_response = resp
if self._find_prompt(resp):
if errored_response:
raise AnsibleConnectionFailure(errored_response)
self._last_response = data
self._command_response += self._sanitize(
resp, command, strip_prompt
)
command_prompt_matched = True
def receive(
self,
command=None,
prompts=None,
answer=None,
newline=True,
prompt_retry_check=False,
check_all=False,
strip_prompt=True,
):
"""
Handles receiving of output from command
"""
self._matched_prompt = None
self._matched_cmd_prompt = None
self._matched_prompt_window = 0
self._window_count = 0
# set terminal regex values for command prompt and errors in response
self._terminal_stderr_re = self._get_terminal_std_re(
"terminal_stderr_re"
)
self._terminal_stdout_re = self._get_terminal_std_re(
"terminal_stdout_re"
)
self._command_timeout = self.get_option("persistent_command_timeout")
self._validate_timeout_value(
self._command_timeout, "persistent_command_timeout"
)
self._buffer_read_timeout = self.get_option(
"persistent_buffer_read_timeout"
)
self._validate_timeout_value(
self._buffer_read_timeout, "persistent_buffer_read_timeout"
)
self._log_messages("command: %s" % command)
if self.ssh_type == "libssh":
response = self.receive_libssh(
command,
prompts,
answer,
newline,
prompt_retry_check,
check_all,
strip_prompt,
)
elif self.ssh_type == "paramiko":
response = self.receive_paramiko(
command,
prompts,
answer,
newline,
prompt_retry_check,
check_all,
strip_prompt,
)
return response
@ensure_connect
def send(
self,
command,
prompt=None,
answer=None,
newline=True,
sendonly=False,
prompt_retry_check=False,
check_all=False,
strip_prompt=True,
):
"""
Sends the command to the device in the opened shell
"""
# try cache first
if (not prompt) and (self._single_user_mode):
out = self.get_cache().lookup(command)
if out:
self.queue_message(
"vvvv", "cache hit for command: %s" % command
)
return out
if check_all:
prompt_len = len(to_list(prompt))
answer_len = len(to_list(answer))
if prompt_len != answer_len:
raise AnsibleConnectionFailure(
"Number of prompts (%s) is not same as that of answers (%s)"
% (prompt_len, answer_len)
)
try:
cmd = b"%s\r" % command
self._history.append(cmd)
self._ssh_shell.sendall(cmd)
self._log_messages("send command: %s" % cmd)
if sendonly:
return
response = self.receive(
command,
prompt,
answer,
newline,
prompt_retry_check,
check_all,
strip_prompt,
)
response = to_text(response, errors="surrogate_then_replace")
if (not prompt) and (self._single_user_mode):
if self._needs_cache_invalidation(command):
# invalidate the existing cache
if self.get_cache().keys():
self.queue_message(
"vvvv", "invalidating existing cache"
)
self.get_cache().invalidate()
else:
# populate cache
self.queue_message(
"vvvv", "populating cache for command: %s" % command
)
self.get_cache().populate(command, response)
return response
except (socket.timeout, AttributeError):
self.queue_message("error", traceback.format_exc())
raise AnsibleConnectionFailure(
"timeout value %s seconds reached while trying to send command: %s"
% (self._ssh_shell.gettimeout(), command.strip())
)
def _handle_buffer_read_timeout(self, signum, frame):
self.queue_message(
"vvvv",
"Response received, triggered 'persistent_buffer_read_timeout' timer of %s seconds"
% self.get_option("persistent_buffer_read_timeout"),
)
raise AnsibleCmdRespRecv()
def _handle_command_timeout(self, signum, frame):
msg = (
"command timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and Troubleshooting Guide."
% self.get_option("persistent_command_timeout")
)
self.queue_message("log", msg)
raise AnsibleConnectionFailure(msg)
def _strip(self, data):
"""
Removes ANSI codes from device response
"""
for regex in self._terminal.ansi_re:
data = regex.sub(b"", data)
return data
def _handle_prompt(
self,
resp,
prompts,
answer,
newline,
prompt_retry_check=False,
check_all=False,
):
"""
Matches the command prompt and responds
:arg resp: Byte string containing the raw response from the remote
:arg prompts: Sequence of byte strings that we consider prompts for input
:arg answer: Sequence of Byte string to send back to the remote if we find a prompt.
A carriage return is automatically appended to this string.
:param prompt_retry_check: Bool value for trying to detect more prompts
:param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
given prompt.
:returns: True if a prompt was found in ``resp``. If check_all is True
will True only after all the prompt in the prompts list are matched. False otherwise.
"""
single_prompt = False
if not isinstance(prompts, list):
prompts = [prompts]
single_prompt = True
if not isinstance(answer, list):
answer = [answer]
try:
prompts_regex = [re.compile(to_bytes(r), re.I) for r in prompts]
except re.error as exc:
raise ConnectionError(
"Failed to compile one or more terminal prompt regexes: %s.\n"
"Prompts provided: %s" % (to_text(exc), prompts)
)
for index, regex in enumerate(prompts_regex):
match = regex.search(resp)
if match:
self._matched_cmd_prompt = match.group()
self._log_messages(
"matched command prompt: %s" % self._matched_cmd_prompt
)
# if prompt_retry_check is enabled to check if same prompt is
# repeated don't send answer again.
if not prompt_retry_check:
prompt_answer = to_bytes(
answer[index] if len(answer) > index else answer[0]
)
if newline:
prompt_answer += b"\r"
self._ssh_shell.sendall(prompt_answer)
self._log_messages(
"matched command prompt answer: %s" % prompt_answer
)
if check_all and prompts and not single_prompt:
prompts.pop(0)
answer.pop(0)
return False
return True
return False
def _sanitize(self, resp, command=None, strip_prompt=True):
"""
Removes elements from the response before returning to the caller
"""
cleaned = []
for line in resp.splitlines():
if command and line.strip() == command.strip():
continue
for prompt in self._matched_prompt.strip().splitlines():
if prompt.strip() in line and strip_prompt:
break
else:
cleaned.append(line)
return b"\n".join(cleaned).strip()
def _find_error(self, response):
"""Searches the buffered response for a matching error condition"""
for stderr_regex in self._terminal_stderr_re:
if stderr_regex.search(response):
self._log_messages(
"matched error regex (terminal_stderr_re) '%s' from response '%s'"
% (stderr_regex.pattern, response)
)
self._log_messages(
"matched stdout regex (terminal_stdout_re) '%s' from error response '%s'"
% (self._matched_pattern, response)
)
return True
return False
def _find_prompt(self, response):
"""Searches the buffered response for a matching command prompt"""
for stdout_regex in self._terminal_stdout_re:
match = stdout_regex.search(response)
if match:
self._matched_pattern = stdout_regex.pattern
self._matched_prompt = match.group()
self._log_messages(
"matched cli prompt '%s' with regex '%s' from response '%s'"
% (self._matched_prompt, self._matched_pattern, response)
)
return True
return False
def _validate_timeout_value(self, timeout, timer_name):
if timeout < 0:
raise AnsibleConnectionFailure(
"'%s' timer value '%s' is invalid, value should be greater than or equal to zero."
% (timer_name, timeout)
)
def transport_test(self, connect_timeout):
"""This method enables wait_for_connection to work.
As it is used by wait_for_connection, it is called by that module's action plugin,
which is on the controller process, which means that nothing done on this instance
should impact the actual persistent connection... this check is for informational
purposes only and should be properly cleaned up.
"""
# Force a fresh connect if for some reason we have connected before.
self.close()
self._connect()
self.close()
def _get_terminal_std_re(self, option):
terminal_std_option = self.get_option(option)
terminal_std_re = []
if terminal_std_option:
for item in terminal_std_option:
if "pattern" not in item:
raise AnsibleConnectionFailure(
"'pattern' is a required key for option '%s',"
" received option value is %s" % (option, item)
)
pattern = rb"%s" % to_bytes(item["pattern"])
flag = item.get("flags", 0)
if flag:
flag = getattr(re, flag.split(".")[1])
terminal_std_re.append(re.compile(pattern, flag))
else:
# To maintain backward compatibility
terminal_std_re = getattr(self._terminal, option)
return terminal_std_re
def copy_file(
self, source=None, destination=None, proto="scp", timeout=30
):
"""Copies file over scp/sftp to remote device
:param source: Source file path
:param destination: Destination file path on remote device
:param proto: Protocol to be used for file transfer,
supported protocol: scp and sftp
:param timeout: Specifies the wait time to receive response from
remote host before triggering timeout exception
:return: None
"""
ssh = self.ssh_type_conn._connect_uncached()
if self.ssh_type == "libssh":
self.ssh_type_conn.put_file(source, destination, proto=proto)
elif self.ssh_type == "paramiko":
if proto == "scp":
if not HAS_SCP:
raise AnsibleError(missing_required_lib("scp"))
with SCPClient(
ssh.get_transport(), socket_timeout=timeout
) as scp:
scp.put(source, destination)
elif proto == "sftp":
with ssh.open_sftp() as sftp:
sftp.put(source, destination)
else:
raise AnsibleError(
"Do not know how to do transfer file over protocol %s"
% proto
)
else:
raise AnsibleError(
"Do not know how to do SCP with ssh_type %s" % self.ssh_type
)
def get_file(self, source=None, destination=None, proto="scp", timeout=30):
"""Fetch file over scp/sftp from remote device
:param source: Source file path
:param destination: Destination file path
:param proto: Protocol to be used for file transfer,
supported protocol: scp and sftp
:param timeout: Specifies the wait time to receive response from
remote host before triggering timeout exception
:return: None
"""
ssh = self.ssh_type_conn._connect_uncached()
if self.ssh_type == "libssh":
self.ssh_type_conn.fetch_file(source, destination, proto=proto)
elif self.ssh_type == "paramiko":
if proto == "scp":
if not HAS_SCP:
raise AnsibleError(missing_required_lib("scp"))
try:
with SCPClient(
ssh.get_transport(), socket_timeout=timeout
) as scp:
scp.get(source, destination)
except EOFError:
# This appears to be benign.
pass
elif proto == "sftp":
with ssh.open_sftp() as sftp:
sftp.get(source, destination)
else:
raise AnsibleError(
"Do not know how to do transfer file over protocol %s"
% proto
)
else:
raise AnsibleError(
"Do not know how to do SCP with ssh_type %s" % self.ssh_type
)
def get_cache(self):
if not self._cache:
# TO-DO: support jsonfile or other modes of caching with
# a configurable option
self._cache = cache_loader.get("ansible.netcommon.memory")
return self._cache
def _is_in_config_mode(self):
"""
Check if the target device is in config mode by comparing
the current prompt with the platform's `terminal_config_prompt`.
Returns False if `terminal_config_prompt` is not defined.
:returns: A boolean indicating if the device is in config mode or not.
"""
cfg_mode = False
cur_prompt = to_text(
self.get_prompt(), errors="surrogate_then_replace"
).strip()
cfg_prompt = getattr(self._terminal, "terminal_config_prompt", None)
if cfg_prompt and cfg_prompt.match(cur_prompt):
cfg_mode = True
return cfg_mode
def _needs_cache_invalidation(self, command):
"""
This method determines if it is necessary to invalidate
the existing cache based on whether the device has entered
configuration mode or if the last command sent to the device
is potentially capable of making configuration changes.
:param command: The last command sent to the target device.
:returns: A boolean indicating if cache invalidation is required or not.
"""
invalidate = False
cfg_cmds = []
try:
# AnsiblePlugin base class in Ansible 2.9 does not have has_option() method.
# TO-DO: use has_option() when we drop 2.9 support.
cfg_cmds = self.cliconf.get_option("config_commands")
except AttributeError:
cfg_cmds = []
if (self._is_in_config_mode()) or (to_text(command) in cfg_cmds):
invalidate = True
return invalidate
| 51,722
|
Python
|
.py
| 1,264
| 29.163766
| 147
| 0.573884
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,339
|
persistent.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/persistent.py
|
# 2017 Red Hat Inc.
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """author: Ansible Core Team
connection: persistent
short_description: Use a persistent unix socket for connection
description:
- This is a helper plugin to allow making other connections persistent.
options:
persistent_command_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait for a command to return from
the remote device. If this timer is exceeded before the command returns, the
connection plugin will raise an exception and close
default: 10
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
"""
from ansible.executor.task_executor import start_connection
from ansible.plugins.connection import ConnectionBase
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.connection import Connection as SocketConnection
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
""" Local based connections """
transport = "ansible.netcommon.persistent"
has_pipelining = False
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(
play_context, new_stdin, *args, **kwargs
)
self._task_uuid = to_text(kwargs.get("task_uuid", ""))
def _connect(self):
self._connected = True
return self
def exec_command(self, cmd, in_data=None, sudoable=True):
display.vvvv(
"exec_command(), socket_path=%s" % self.socket_path,
host=self._play_context.remote_addr,
)
connection = SocketConnection(self.socket_path)
out = connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
return 0, out, ""
def put_file(self, in_path, out_path):
pass
def fetch_file(self, in_path, out_path):
pass
def close(self):
self._connected = False
def run(self):
"""Returns the path of the persistent connection socket.
Attempts to ensure (within playcontext.timeout seconds) that the
socket path exists. If the path exists (or the timeout has expired),
returns the socket path.
"""
display.vvvv(
"starting connection from persistent connection plugin",
host=self._play_context.remote_addr,
)
variables = {
"ansible_command_timeout": self.get_option(
"persistent_command_timeout"
)
}
socket_path = start_connection(
self._play_context, variables, self._task_uuid
)
display.vvvv(
"local domain socket path is %s" % socket_path,
host=self._play_context.remote_addr,
)
setattr(self, "_socket_path", socket_path)
return socket_path
| 3,107
|
Python
|
.py
| 81
| 31.493827
| 92
| 0.671538
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,340
|
cli_config.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/cli_config.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "network",
}
DOCUMENTATION = """module: cli_config
author: Trishna Guha (@trishnaguha)
notes:
- The commands will be returned only for platforms that do not support onbox diff.
The C(--diff) option with the playbook will return the difference in configuration
for devices that has support for onbox diff
short_description: Push text based configuration to network devices over network_cli
description:
- This module provides platform agnostic way of pushing text based configuration to
network devices over network_cli connection plugin.
extends_documentation_fragment:
- ansible.netcommon.network_agnostic
options:
config:
description:
- The config to be pushed to the network device. This argument is mutually exclusive
with C(rollback) and either one of the option should be given as input. The
config should have indentation that the device uses.
type: str
commit:
description:
- The C(commit) argument instructs the module to push the configuration to the
device. This is mapped to module check mode.
type: bool
replace:
description:
- If the C(replace) argument is set to C(yes), it will replace the entire running-config
of the device with the C(config) argument value. For devices that support replacing
running configuration from file on device like NXOS/JUNOS, the C(replace) argument
takes path to the file on the device that will be used for replacing the entire
running-config. The value of C(config) option should be I(None) for such devices.
Nexus 9K devices only support replace. Use I(net_put) or I(nxos_file_copy) in
case of NXOS module to copy the flat file to remote device and then use set
the fullpath to this argument.
type: str
backup:
description:
- This argument will cause the module to create a full backup of the current running
config from the remote device before any changes are made. If the C(backup_options)
value is not given, the backup file is written to the C(backup) folder in the
playbook root directory or role root directory, if playbook is part of an ansible
role. If the directory does not exist, it is created.
type: bool
default: 'no'
rollback:
description:
- The C(rollback) argument instructs the module to rollback the current configuration
to the identifier specified in the argument. If the specified rollback identifier
does not exist on the remote device, the module will fail. To rollback to the
most recent commit, set the C(rollback) argument to 0. This option is mutually
exclusive with C(config).
commit_comment:
description:
- The C(commit_comment) argument specifies a text string to be used when committing
the configuration. If the C(commit) argument is set to False, this argument
is silently ignored. This argument is only valid for the platforms that support
commit operation with comment.
type: str
defaults:
description:
- The I(defaults) argument will influence how the running-config is collected
from the device. When the value is set to true, the command used to collect
the running-config is append with the all keyword. When the value is set to
false, the command is issued without the all keyword.
default: 'no'
type: bool
multiline_delimiter:
description:
- This argument is used when pushing a multiline configuration element to the
device. It specifies the character to use as the delimiting character. This
only applies to the configuration action.
type: str
diff_replace:
description:
- Instructs the module on the way to perform the configuration on the device.
If the C(diff_replace) argument is set to I(line) then the modified lines are
pushed to the device in configuration mode. If the argument is set to I(block)
then the entire command block is pushed to the device in configuration mode
if any line is not correct. Note that this parameter will be ignored if the
platform has onbox diff support.
choices:
- line
- block
- config
diff_match:
description:
- Instructs the module on the way to perform the matching of the set of commands
against the current device config. If C(diff_match) is set to I(line), commands
are matched line by line. If C(diff_match) is set to I(strict), command lines
are matched with respect to position. If C(diff_match) is set to I(exact), command
lines must be an equal match. Finally, if C(diff_match) is set to I(none), the
module will not attempt to compare the source configuration with the running
configuration on the remote device. Note that this parameter will be ignored
if the platform has onbox diff support.
choices:
- line
- strict
- exact
- none
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be ignored during
the diff. This is used for lines in the configuration that are automatically
updated by the system. This argument takes a list of regular expressions or
exact line matches. Note that this parameter will be ignored if the platform
has onbox diff support.
backup_options:
description:
- This is a dict object containing configurable options related to backup file
path. The value of this option is read only when C(backup) is set to I(yes),
if C(backup) is set to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the filename
is not given it will be generated based on the hostname, current time and
date in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will
be first created and the filename is either the value of C(filename) or
default filename as described in C(filename) options description. If the
path value is not given in that case a I(backup) directory will be created
in the current working directory and backup configuration will be copied
in C(filename) within I(backup) directory.
type: path
type: dict
"""
EXAMPLES = """
- name: configure device with config
cli_config:
config: "{{ lookup('template', 'basic/config.j2') }}"
- name: multiline config
cli_config:
config: |
hostname foo
feature nxapi
- name: configure device with config with defaults enabled
cli_config:
config: "{{ lookup('template', 'basic/config.j2') }}"
defaults: yes
- name: Use diff_match
cli_config:
config: "{{ lookup('file', 'interface_config') }}"
diff_match: none
- name: nxos replace config
cli_config:
replace: 'bootflash:nxoscfg'
- name: junos replace config
cli_config:
replace: '/var/home/ansible/junos01.cfg'
- name: commit with comment
cli_config:
config: set system host-name foo
commit_comment: this is a test
- name: configurable backup path
cli_config:
config: "{{ lookup('template', 'basic/config.j2') }}"
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['interface Loopback999', 'no shutdown']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/hostname_config.2016-07-16@22:28:34
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.common.text.converters import to_text
def validate_args(module, device_operations):
"""validate param if it is supported on the platform
"""
feature_list = [
"replace",
"rollback",
"commit_comment",
"defaults",
"multiline_delimiter",
"diff_replace",
"diff_match",
"diff_ignore_lines",
]
for feature in feature_list:
if module.params[feature]:
supports_feature = device_operations.get("supports_%s" % feature)
if supports_feature is None:
module.fail_json(
"This platform does not specify whether %s is supported or not. "
"Please report an issue against this platform's cliconf plugin."
% feature
)
elif not supports_feature:
module.fail_json(
msg="Option %s is not supported on this platform" % feature
)
def run(
module, device_operations, connection, candidate, running, rollback_id
):
result = {}
resp = {}
config_diff = []
banner_diff = {}
replace = module.params["replace"]
commit_comment = module.params["commit_comment"]
multiline_delimiter = module.params["multiline_delimiter"]
diff_replace = module.params["diff_replace"]
diff_match = module.params["diff_match"]
diff_ignore_lines = module.params["diff_ignore_lines"]
commit = not module.check_mode
if replace in ("yes", "true", "True"):
replace = True
elif replace in ("no", "false", "False"):
replace = False
if (
replace is not None
and replace not in [True, False]
and candidate is not None
):
module.fail_json(
msg="Replace value '%s' is a configuration file path already"
" present on the device. Hence 'replace' and 'config' options"
" are mutually exclusive" % replace
)
if rollback_id is not None:
resp = connection.rollback(rollback_id, commit)
if "diff" in resp:
result["changed"] = True
elif device_operations.get("supports_onbox_diff"):
if diff_replace:
module.warn(
"diff_replace is ignored as the device supports onbox diff"
)
if diff_match:
module.warn(
"diff_mattch is ignored as the device supports onbox diff"
)
if diff_ignore_lines:
module.warn(
"diff_ignore_lines is ignored as the device supports onbox diff"
)
if candidate and not isinstance(candidate, list):
candidate = candidate.strip("\n").splitlines()
kwargs = {
"candidate": candidate,
"commit": commit,
"replace": replace,
"comment": commit_comment,
}
resp = connection.edit_config(**kwargs)
if "diff" in resp:
result["changed"] = True
elif device_operations.get("supports_generate_diff"):
kwargs = {"candidate": candidate, "running": running}
if diff_match:
kwargs.update({"diff_match": diff_match})
if diff_replace:
kwargs.update({"diff_replace": diff_replace})
if diff_ignore_lines:
kwargs.update({"diff_ignore_lines": diff_ignore_lines})
diff_response = connection.get_diff(**kwargs)
config_diff = diff_response.get("config_diff")
banner_diff = diff_response.get("banner_diff")
if config_diff:
if isinstance(config_diff, list):
candidate = config_diff
else:
candidate = config_diff.splitlines()
kwargs = {
"candidate": candidate,
"commit": commit,
"replace": replace,
"comment": commit_comment,
}
if commit:
connection.edit_config(**kwargs)
result["changed"] = True
result["commands"] = config_diff.split("\n")
if banner_diff:
candidate = json.dumps(banner_diff)
kwargs = {"candidate": candidate, "commit": commit}
if multiline_delimiter:
kwargs.update({"multiline_delimiter": multiline_delimiter})
if commit:
connection.edit_banner(**kwargs)
result["changed"] = True
if module._diff:
if "diff" in resp:
result["diff"] = {"prepared": resp["diff"]}
else:
diff = ""
if config_diff:
if isinstance(config_diff, list):
diff += "\n".join(config_diff)
else:
diff += config_diff
if banner_diff:
diff += json.dumps(banner_diff)
result["diff"] = {"prepared": diff}
return result
def main():
"""main entry point for execution
"""
backup_spec = dict(filename=dict(), dir_path=dict(type="path"))
argument_spec = dict(
backup=dict(default=False, type="bool"),
backup_options=dict(type="dict", options=backup_spec),
config=dict(type="str"),
commit=dict(type="bool"),
replace=dict(type="str"),
rollback=dict(type="int"),
commit_comment=dict(type="str"),
defaults=dict(default=False, type="bool"),
multiline_delimiter=dict(type="str"),
diff_replace=dict(choices=["line", "block", "config"]),
diff_match=dict(choices=["line", "strict", "exact", "none"]),
diff_ignore_lines=dict(type="list"),
)
mutually_exclusive = [("config", "rollback")]
required_one_of = [["backup", "config", "rollback"]]
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_one_of=required_one_of,
supports_check_mode=True,
)
result = {"changed": False}
connection = Connection(module._socket_path)
capabilities = module.from_json(connection.get_capabilities())
if capabilities:
device_operations = capabilities.get("device_operations", dict())
validate_args(module, device_operations)
else:
device_operations = dict()
if module.params["defaults"]:
if "get_default_flag" in capabilities.get("rpc"):
flags = connection.get_default_flag()
else:
flags = "all"
else:
flags = []
candidate = module.params["config"]
candidate = (
to_text(candidate, errors="surrogate_then_replace")
if candidate
else None
)
running = connection.get_config(flags=flags)
rollback_id = module.params["rollback"]
if module.params["backup"]:
result["__backup__"] = running
if candidate or rollback_id or module.params["replace"]:
try:
result.update(
run(
module,
device_operations,
connection,
candidate,
running,
rollback_id,
)
)
except Exception as exc:
module.fail_json(msg=to_text(exc))
module.exit_json(**result)
if __name__ == "__main__":
main()
| 15,618
|
Python
|
.py
| 388
| 32.541237
| 92
| 0.65195
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,341
|
config.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import annotations
import re
import hashlib
from ansible.module_utils.six.moves import zip
from ansible.module_utils.common.text.converters import to_bytes, to_native
DEFAULT_COMMENT_TOKENS = ["#", "!", "/*", "*/", "echo"]
DEFAULT_IGNORE_LINES_RE = set(
[
re.compile(r"Using \d+ out of \d+ bytes"),
re.compile(r"Building configuration"),
re.compile(r"Current configuration : \d+ bytes"),
]
)
try:
Pattern = re._pattern_type
except AttributeError:
Pattern = re.Pattern
class ConfigLine(object):
def __init__(self, raw):
self.text = str(raw).strip()
self.raw = raw
self._children = list()
self._parents = list()
def __str__(self):
return self.raw
def __eq__(self, other):
return self.line == other.line
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, key):
for item in self._children:
if item.text == key:
return item
raise KeyError(key)
@property
def line(self):
line = self.parents
line.append(self.text)
return " ".join(line)
@property
def children(self):
return _obj_to_text(self._children)
@property
def child_objs(self):
return self._children
@property
def parents(self):
return _obj_to_text(self._parents)
@property
def path(self):
config = _obj_to_raw(self._parents)
config.append(self.raw)
return "\n".join(config)
@property
def has_children(self):
return len(self._children) > 0
@property
def has_parents(self):
return len(self._parents) > 0
def add_child(self, obj):
if not isinstance(obj, ConfigLine):
raise AssertionError("child must be of type `ConfigLine`")
self._children.append(obj)
def ignore_line(text, tokens=None):
for item in tokens or DEFAULT_COMMENT_TOKENS:
if text.startswith(item):
return True
for regex in DEFAULT_IGNORE_LINES_RE:
if regex.match(text):
return True
def _obj_to_text(x):
return [o.text for o in x]
def _obj_to_raw(x):
return [o.raw for o in x]
def _obj_to_block(objects, visited=None):
items = list()
for o in objects:
if o not in items:
items.append(o)
for child in o._children:
if child not in items:
items.append(child)
return _obj_to_raw(items)
def dumps(objects, output="block", comments=False):
if output == "block":
items = _obj_to_block(objects)
elif output == "commands":
items = _obj_to_text(objects)
elif output == "raw":
items = _obj_to_raw(objects)
else:
raise TypeError("unknown value supplied for keyword output")
if output == "block":
if comments:
for index, item in enumerate(items):
nextitem = index + 1
if (
nextitem < len(items)
and not item.startswith(" ")
and items[nextitem].startswith(" ")
):
item = "!\n%s" % item
items[index] = item
items.append("!")
items.append("end")
return "\n".join(items)
class NetworkConfig(object):
def __init__(self, indent=1, contents=None, ignore_lines=None):
self._indent = indent
self._items = list()
self._config_text = None
if ignore_lines:
for item in ignore_lines:
if not isinstance(item, Pattern):
item = re.compile(item)
DEFAULT_IGNORE_LINES_RE.add(item)
if contents:
self.load(contents)
@property
def items(self):
return self._items
@property
def config_text(self):
return self._config_text
@property
def sha1(self):
sha1 = hashlib.sha1()
sha1.update(to_bytes(str(self), errors="surrogate_or_strict"))
return sha1.digest()
def __getitem__(self, key):
for line in self:
if line.text == key:
return line
raise KeyError(key)
def __iter__(self):
return iter(self._items)
def __str__(self):
return "\n".join([c.raw for c in self.items])
def __len__(self):
return len(self._items)
def load(self, s):
self._config_text = s
self._items = self.parse(s)
def loadfp(self, fp):
with open(fp) as f:
return self.load(f.read())
def parse(self, lines, comment_tokens=None):
toplevel = re.compile(r"\S")
childline = re.compile(r"^\s*(.+)$")
entry_reg = re.compile(r"([{};])")
ancestors = list()
config = list()
indents = [0]
for linenum, line in enumerate(
to_native(lines, errors="surrogate_or_strict").split("\n")
):
text = entry_reg.sub("", line).strip()
cfg = ConfigLine(line)
if not text or ignore_line(text, comment_tokens):
continue
# handle top level commands
if toplevel.match(line):
ancestors = [cfg]
indents = [0]
# handle sub level commands
else:
match = childline.match(line)
line_indent = match.start(1)
if line_indent < indents[-1]:
while indents[-1] > line_indent:
indents.pop()
if line_indent > indents[-1]:
indents.append(line_indent)
curlevel = len(indents) - 1
parent_level = curlevel - 1
cfg._parents = ancestors[:curlevel]
if curlevel > len(ancestors):
config.append(cfg)
continue
for i in range(curlevel, len(ancestors)):
ancestors.pop()
ancestors.append(cfg)
ancestors[parent_level].add_child(cfg)
config.append(cfg)
return config
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
if item.parents == path[:-1]:
return item
def get_block(self, path):
if not isinstance(path, list):
raise AssertionError("path argument must be a list object")
obj = self.get_object(path)
if not obj:
raise ValueError("path does not exist in config")
return self._expand_block(obj)
def get_block_config(self, path):
block = self.get_block(path)
return dumps(block, "block")
def _expand_block(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj._children:
if child in S:
continue
self._expand_block(child, S)
return S
def _diff_line(self, other):
updates = list()
for item in self.items:
if item not in other:
updates.append(item)
return updates
def _diff_strict(self, other):
updates = list()
# block extracted from other does not have all parents
# but the last one. In case of multiple parents we need
# to add additional parents.
if other and isinstance(other, list) and len(other) > 0:
start_other = other[0]
if start_other.parents:
for parent in start_other.parents:
other.insert(0, ConfigLine(parent))
for index, line in enumerate(self.items):
try:
if str(line).strip() != str(other[index]).strip():
updates.append(line)
except (AttributeError, IndexError):
updates.append(line)
return updates
def _diff_exact(self, other):
updates = list()
if len(other) != len(self.items):
updates.extend(self.items)
else:
for ours, theirs in zip(self.items, other):
if ours != theirs:
updates.extend(self.items)
break
return updates
def difference(self, other, match="line", path=None, replace=None):
"""Perform a config diff against the another network config
:param other: instance of NetworkConfig to diff against
:param match: type of diff to perform. valid values are 'line',
'strict', 'exact'
:param path: context in the network config to filter the diff
:param replace: the method used to generate the replacement lines.
valid values are 'block', 'line'
:returns: a string of lines that are different
"""
if path and match != "line":
try:
other = other.get_block(path)
except ValueError:
other = list()
else:
other = other.items
# generate a list of ConfigLines that aren't in other
meth = getattr(self, "_diff_%s" % match)
updates = meth(other)
if replace == "block":
parents = list()
for item in updates:
if not item.has_parents:
parents.append(item)
else:
for p in item._parents:
if p not in parents:
parents.append(p)
updates = list()
for item in parents:
updates.extend(self._expand_block(item))
visited = set()
expanded = list()
for item in updates:
for p in item._parents:
if p.line not in visited:
visited.add(p.line)
expanded.append(p)
expanded.append(item)
visited.add(item.line)
return expanded
def add(self, lines, parents=None):
ancestors = list()
offset = 0
obj = None
# global config command
if not parents:
for line in lines:
# handle ignore lines
if ignore_line(line):
continue
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_block(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self._indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj._parents = list(ancestors)
ancestors[-1]._children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in lines:
# handle ignore lines
if ignore_line(line):
continue
# check if child already exists
for child in ancestors[-1]._children:
if child.text == line:
break
else:
offset = len(parents) * self._indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item._parents = ancestors
ancestors[-1]._children.append(item)
self.items.append(item)
class CustomNetworkConfig(NetworkConfig):
def items_text(self):
return [item.text for item in self.items]
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.child_objs:
if child in S:
continue
self.expand_section(child, S)
return S
def to_block(self, section):
return "\n".join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError("path does not exist in config")
return self.expand_section(obj)
| 14,454
|
Python
|
.py
| 384
| 26.882813
| 92
| 0.563059
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,342
|
network.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
import traceback
import json
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.connection import Connection, ConnectionError
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.netconf import (
NetconfConnection,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import (
Cli,
)
from ansible.module_utils.six import iteritems
NET_TRANSPORT_ARGS = dict(
host=dict(required=True),
port=dict(type="int"),
username=dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])),
password=dict(
no_log=True, fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"])
),
ssh_keyfile=dict(
fallback=(env_fallback, ["ANSIBLE_NET_SSH_KEYFILE"]), type="path"
),
authorize=dict(
default=False,
fallback=(env_fallback, ["ANSIBLE_NET_AUTHORIZE"]),
type="bool",
),
auth_pass=dict(
no_log=True, fallback=(env_fallback, ["ANSIBLE_NET_AUTH_PASS"])
),
provider=dict(type="dict", no_log=True),
transport=dict(choices=list()),
timeout=dict(default=10, type="int"),
)
NET_CONNECTION_ARGS = dict()
NET_CONNECTIONS = dict()
def _transitional_argument_spec():
argument_spec = {}
for key, value in iteritems(NET_TRANSPORT_ARGS):
value["required"] = False
argument_spec[key] = value
return argument_spec
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class ModuleStub(object):
def __init__(self, argument_spec, fail_json):
self.params = dict()
for key, value in argument_spec.items():
self.params[key] = value.get("default")
self.fail_json = fail_json
class NetworkError(Exception):
def __init__(self, msg, **kwargs):
super(NetworkError, self).__init__(msg)
self.kwargs = kwargs
class Config(object):
def __init__(self, connection):
self.connection = connection
def __call__(self, commands, **kwargs):
lines = to_list(commands)
return self.connection.configure(lines, **kwargs)
def load_config(self, commands, **kwargs):
commands = to_list(commands)
return self.connection.load_config(commands, **kwargs)
def get_config(self, **kwargs):
return self.connection.get_config(**kwargs)
def save_config(self):
return self.connection.save_config()
class NetworkModule(AnsibleModule):
def __init__(self, *args, **kwargs):
connect_on_load = kwargs.pop("connect_on_load", True)
argument_spec = NET_TRANSPORT_ARGS.copy()
argument_spec["transport"]["choices"] = NET_CONNECTIONS.keys()
argument_spec.update(NET_CONNECTION_ARGS.copy())
if kwargs.get("argument_spec"):
argument_spec.update(kwargs["argument_spec"])
kwargs["argument_spec"] = argument_spec
super(NetworkModule, self).__init__(*args, **kwargs)
self.connection = None
self._cli = None
self._config = None
try:
transport = self.params["transport"] or "__default__"
cls = NET_CONNECTIONS[transport]
self.connection = cls()
except KeyError:
self.fail_json(
msg="Unknown transport or no default transport specified"
)
except (TypeError, NetworkError) as exc:
self.fail_json(
msg=to_native(exc), exception=traceback.format_exc()
)
if connect_on_load:
self.connect()
@property
def cli(self):
if not self.connected:
self.connect()
if self._cli:
return self._cli
self._cli = Cli(self.connection)
return self._cli
@property
def config(self):
if not self.connected:
self.connect()
if self._config:
return self._config
self._config = Config(self.connection)
return self._config
@property
def connected(self):
return self.connection._connected
def _load_params(self):
super(NetworkModule, self)._load_params()
provider = self.params.get("provider") or dict()
for key, value in provider.items():
for args in [NET_TRANSPORT_ARGS, NET_CONNECTION_ARGS]:
if key in args:
if self.params.get(key) is None and value is not None:
self.params[key] = value
def connect(self):
try:
if not self.connected:
self.connection.connect(self.params)
if self.params["authorize"]:
self.connection.authorize(self.params)
self.log(
"connected to %s:%s using %s"
% (
self.params["host"],
self.params["port"],
self.params["transport"],
)
)
except NetworkError as exc:
self.fail_json(
msg=to_native(exc), exception=traceback.format_exc()
)
def disconnect(self):
try:
if self.connected:
self.connection.disconnect()
self.log("disconnected from %s" % self.params["host"])
except NetworkError as exc:
self.fail_json(
msg=to_native(exc), exception=traceback.format_exc()
)
def register_transport(transport, default=False):
def register(cls):
NET_CONNECTIONS[transport] = cls
if default:
NET_CONNECTIONS["__default__"] = cls
return cls
return register
def add_argument(key, value):
NET_CONNECTION_ARGS[key] = value
def get_resource_connection(module):
if hasattr(module, "_connection"):
return module._connection
capabilities = get_capabilities(module)
network_api = capabilities.get("network_api")
if network_api in ("cliconf", "nxapi", "eapi", "exosapi"):
module._connection = Connection(module._socket_path)
elif network_api == "netconf":
module._connection = NetconfConnection(module._socket_path)
elif network_api == "local":
# This isn't supported, but we shouldn't fail here.
# Set the connection to a fake connection so it fails sensibly.
module._connection = LocalResourceConnection(module)
else:
module.fail_json(
msg="Invalid connection type {0!s}".format(network_api)
)
return module._connection
def get_capabilities(module):
if hasattr(module, "capabilities"):
return module._capabilities
try:
capabilities = Connection(module._socket_path).get_capabilities()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
except AssertionError:
# No socket_path, connection most likely local.
return dict(network_api="local")
module._capabilities = json.loads(capabilities)
return module._capabilities
class LocalResourceConnection:
def __init__(self, module):
self.module = module
def get(self, *args, **kwargs):
self.module.fail_json(
msg="Network resource modules not supported over local connection."
)
| 9,244
|
Python
|
.py
| 227
| 33.057269
| 95
| 0.656222
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,343
|
parsing.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
import re
import shlex
import time
from ansible.module_utils.parsing.convert_bool import (
BOOLEANS_TRUE,
BOOLEANS_FALSE,
)
from ansible.module_utils.six import string_types, text_type
from ansible.module_utils.six.moves import zip
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class FailedConditionsError(Exception):
def __init__(self, msg, failed_conditions):
super(FailedConditionsError, self).__init__(msg)
self.failed_conditions = failed_conditions
class FailedConditionalError(Exception):
def __init__(self, msg, failed_conditional):
super(FailedConditionalError, self).__init__(msg)
self.failed_conditional = failed_conditional
class AddCommandError(Exception):
def __init__(self, msg, command):
super(AddCommandError, self).__init__(msg)
self.command = command
class AddConditionError(Exception):
def __init__(self, msg, condition):
super(AddConditionError, self).__init__(msg)
self.condition = condition
class Cli(object):
def __init__(self, connection):
self.connection = connection
self.default_output = connection.default_output or "text"
self._commands = list()
@property
def commands(self):
return [str(c) for c in self._commands]
def __call__(self, commands, output=None):
objects = list()
for cmd in to_list(commands):
objects.append(self.to_command(cmd, output))
return self.connection.run_commands(objects)
def to_command(
self, command, output=None, prompt=None, response=None, **kwargs
):
output = output or self.default_output
if isinstance(command, Command):
return command
if isinstance(prompt, string_types):
prompt = re.compile(re.escape(prompt))
return Command(
command, output, prompt=prompt, response=response, **kwargs
)
def add_commands(self, commands, output=None, **kwargs):
for cmd in commands:
self._commands.append(self.to_command(cmd, output, **kwargs))
def run_commands(self):
responses = self.connection.run_commands(self._commands)
for resp, cmd in zip(responses, self._commands):
cmd.response = resp
# wipe out the commands list to avoid issues if additional
# commands are executed later
self._commands = list()
return responses
class Command(object):
def __init__(
self, command, output=None, prompt=None, response=None, **kwargs
):
self.command = command
self.output = output
self.command_string = command
self.prompt = prompt
self.response = response
self.args = kwargs
def __str__(self):
return self.command_string
class CommandRunner(object):
def __init__(self, module):
self.module = module
self.items = list()
self.conditionals = set()
self.commands = list()
self.retries = 10
self.interval = 1
self.match = "all"
self._default_output = module.connection.default_output
def add_command(
self, command, output=None, prompt=None, response=None, **kwargs
):
if command in [str(c) for c in self.commands]:
raise AddCommandError(
"duplicated command detected", command=command
)
cmd = self.module.cli.to_command(
command, output=output, prompt=prompt, response=response, **kwargs
)
self.commands.append(cmd)
def get_command(self, command, output=None):
for cmd in self.commands:
if cmd.command == command:
return cmd.response
raise ValueError("command '%s' not found" % command)
def get_responses(self):
return [cmd.response for cmd in self.commands]
def add_conditional(self, condition):
try:
self.conditionals.add(Conditional(condition))
except AttributeError as exc:
raise AddConditionError(msg=str(exc), condition=condition)
def run(self):
while self.retries > 0:
self.module.cli.add_commands(self.commands)
responses = self.module.cli.run_commands()
for item in list(self.conditionals):
if item(responses):
if self.match == "any":
return item
self.conditionals.remove(item)
if not self.conditionals:
break
time.sleep(self.interval)
self.retries -= 1
else:
failed_conditions = [item.raw for item in self.conditionals]
errmsg = (
"One or more conditional statements have not been satisfied"
)
raise FailedConditionsError(errmsg, failed_conditions)
class Conditional(object):
"""Used in command modules to evaluate waitfor conditions
"""
OPERATORS = {
"eq": ["eq", "=="],
"neq": ["neq", "ne", "!="],
"gt": ["gt", ">"],
"ge": ["ge", ">="],
"lt": ["lt", "<"],
"le": ["le", "<="],
"contains": ["contains"],
"matches": ["matches"],
}
def __init__(self, conditional, encoding=None):
self.raw = conditional
self.negate = False
try:
components = shlex.split(conditional)
key, val = components[0], components[-1]
op_components = components[1:-1]
if "not" in op_components:
self.negate = True
op_components.pop(op_components.index("not"))
op = op_components[0]
except ValueError:
raise ValueError("failed to parse conditional")
self.key = key
self.func = self._func(op)
self.value = self._cast_value(val)
def __call__(self, data):
value = self.get_value(dict(result=data))
if not self.negate:
return self.func(value)
else:
return not self.func(value)
def _cast_value(self, value):
if value in BOOLEANS_TRUE:
return True
elif value in BOOLEANS_FALSE:
return False
elif re.match(r"^\d+\.d+$", value):
return float(value)
elif re.match(r"^\d+$", value):
return int(value)
else:
return text_type(value)
def _func(self, oper):
for func, operators in self.OPERATORS.items():
if oper in operators:
return getattr(self, func)
raise AttributeError("unknown operator: %s" % oper)
def get_value(self, result):
try:
return self.get_json(result)
except (IndexError, TypeError, AttributeError):
msg = "unable to apply conditional to result"
raise FailedConditionalError(msg, self.raw)
def get_json(self, result):
string = re.sub(r"\[[\'|\"]", ".", self.key)
string = re.sub(r"[\'|\"]\]", ".", string)
parts = re.split(r"\.(?=[^\]]*(?:\[|$))", string)
for part in parts:
match = re.findall(r"\[(\S+?)\]", part)
if match:
key = part[: part.find("[")]
result = result[key]
for m in match:
try:
m = int(m)
except ValueError:
m = str(m)
result = result[m]
else:
result = result.get(part)
return result
def number(self, value):
if "." in str(value):
return float(value)
else:
return int(value)
def eq(self, value):
return value == self.value
def neq(self, value):
return value != self.value
def gt(self, value):
return self.number(value) > self.value
def ge(self, value):
return self.number(value) >= self.value
def lt(self, value):
return self.number(value) < self.value
def le(self, value):
return self.number(value) <= self.value
def contains(self, value):
return str(self.value) in value
def matches(self, value):
match = re.search(self.value, value, re.M)
return match is not None
| 10,147
|
Python
|
.py
| 255
| 31.129412
| 92
| 0.618311
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,344
|
utils.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Networking tools for network modules only
from __future__ import annotations
import re
import ast
import operator
import socket
import json
from itertools import chain
from ansible.module_utils.common.text.converters import to_text, to_bytes
from ansible.module_utils.six.moves.collections_abc import Mapping
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils import basic
from ansible.module_utils.parsing.convert_bool import boolean
try:
from jinja2 import Environment, StrictUndefined
from jinja2.exceptions import UndefinedError
HAS_JINJA2 = True
except ImportError:
HAS_JINJA2 = False
OPERATORS = frozenset(["ge", "gt", "eq", "neq", "lt", "le"])
ALIASES = frozenset(
[("min", "ge"), ("max", "le"), ("exactly", "eq"), ("neq", "ne")]
)
def to_list(val):
if isinstance(val, (list, tuple, set)):
return list(val)
elif val is not None:
return [val]
else:
return list()
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = to_text(item).split("\n")
yield item
def transform_commands(module):
transform = ComplexList(
dict(
command=dict(key=True),
output=dict(),
prompt=dict(type="list"),
answer=dict(type="list"),
newline=dict(type="bool", default=True),
sendonly=dict(type="bool", default=False),
check_all=dict(type="bool", default=False),
),
module,
)
return transform(module.params["commands"])
def sort_list(val):
if isinstance(val, list):
return sorted(val)
return val
class Entity(object):
"""Transforms a dict to with an argument spec
This class will take a dict and apply an Ansible argument spec to the
values. The resulting dict will contain all of the keys in the param
with appropriate values set.
Example::
argument_spec = dict(
command=dict(key=True),
display=dict(default='text', choices=['text', 'json']),
validate=dict(type='bool')
)
transform = Entity(module, argument_spec)
value = dict(command='foo')
result = transform(value)
print result
{'command': 'foo', 'display': 'text', 'validate': None}
Supported argument spec:
* key - specifies how to map a single value to a dict
* read_from - read and apply the argument_spec from the module
* required - a value is required
* type - type of value (uses AnsibleModule type checker)
* fallback - implements fallback function
* choices - set of valid options
* default - default value
"""
def __init__(
self, module, attrs=None, args=None, keys=None, from_argspec=False
):
args = [] if args is None else args
self._attributes = attrs or {}
self._module = module
for arg in args:
self._attributes[arg] = dict()
if from_argspec:
self._attributes[arg]["read_from"] = arg
if keys and arg in keys:
self._attributes[arg]["key"] = True
self.attr_names = frozenset(self._attributes.keys())
_has_key = False
for name, attr in iteritems(self._attributes):
if attr.get("read_from"):
if attr["read_from"] not in self._module.argument_spec:
module.fail_json(
msg="argument %s does not exist" % attr["read_from"]
)
spec = self._module.argument_spec.get(attr["read_from"])
for key, value in iteritems(spec):
if key not in attr:
attr[key] = value
if attr.get("key"):
if _has_key:
module.fail_json(msg="only one key value can be specified")
_has_key = True
attr["required"] = True
def serialize(self):
return self._attributes
def to_dict(self, value):
obj = {}
for name, attr in iteritems(self._attributes):
if attr.get("key"):
obj[name] = value
else:
obj[name] = attr.get("default")
return obj
def __call__(self, value, strict=True):
if not isinstance(value, dict):
value = self.to_dict(value)
if strict:
unknown = set(value).difference(self.attr_names)
if unknown:
self._module.fail_json(
msg="invalid keys: %s" % ",".join(unknown)
)
for name, attr in iteritems(self._attributes):
if value.get(name) is None:
value[name] = attr.get("default")
if attr.get("fallback") and not value.get(name):
fallback = attr.get("fallback", (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
value[name] = fallback_strategy(
*fallback_args, **fallback_kwargs
)
except basic.AnsibleFallbackNotFound:
continue
if attr.get("required") and value.get(name) is None:
self._module.fail_json(
msg="missing required attribute %s" % name
)
if "choices" in attr:
if value[name] not in attr["choices"]:
self._module.fail_json(
msg="%s must be one of %s, got %s"
% (name, ", ".join(attr["choices"]), value[name])
)
if value[name] is not None:
value_type = attr.get("type", "str")
type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[
value_type
]
type_checker(value[name])
elif value.get(name):
value[name] = self._module.params[name]
return value
class EntityCollection(Entity):
"""Extends ```Entity``` to handle a list of dicts """
def __call__(self, iterable, strict=True):
if iterable is None:
iterable = [
super(EntityCollection, self).__call__(
self._module.params, strict
)
]
if not isinstance(iterable, (list, tuple)):
self._module.fail_json(msg="value must be an iterable")
return [
(super(EntityCollection, self).__call__(i, strict))
for i in iterable
]
# these two are for backwards compatibility and can be removed once all of the
# modules that use them are updated
class ComplexDict(Entity):
def __init__(self, attrs, module, *args, **kwargs):
super(ComplexDict, self).__init__(module, attrs, *args, **kwargs)
class ComplexList(EntityCollection):
def __init__(self, attrs, module, *args, **kwargs):
super(ComplexList, self).__init__(module, attrs, *args, **kwargs)
def dict_diff(base, comparable):
""" Generate a dict object of differences
This function will compare two dict objects and return the difference
between them as a dict object. For scalar values, the key will reflect
the updated value. If the key does not exist in `comparable`, then then no
key will be returned. For lists, the value in comparable will wholly replace
the value in base for the key. For dicts, the returned value will only
return keys that are different.
:param base: dict object to base the diff on
:param comparable: dict object to compare against base
:returns: new dict object with differences
"""
if not isinstance(base, dict):
raise AssertionError("`base` must be of type <dict>")
if not isinstance(comparable, dict):
if comparable is None:
comparable = dict()
else:
raise AssertionError("`comparable` must be of type <dict>")
updates = dict()
for key, value in iteritems(base):
if isinstance(value, dict):
item = comparable.get(key)
if item is not None:
sub_diff = dict_diff(value, comparable[key])
if sub_diff:
updates[key] = sub_diff
else:
comparable_value = comparable.get(key)
if comparable_value is not None:
if sort_list(base[key]) != sort_list(comparable_value):
updates[key] = comparable_value
for key in set(comparable.keys()).difference(base.keys()):
updates[key] = comparable.get(key)
return updates
def dict_merge(base, other):
""" Return a new dict object that combines base and other
This will create a new dict object that is a combination of the key/value
pairs from base and other. When both keys exist, the value will be
selected from other. If the value is a list object, the two lists will
be combined and duplicate entries removed.
:param base: dict object to serve as base
:param other: dict object to combine with base
:returns: new combined dict object
"""
if not isinstance(base, dict):
raise AssertionError("`base` must be of type <dict>")
if not isinstance(other, dict):
raise AssertionError("`other` must be of type <dict>")
combined = dict()
for key, value in iteritems(base):
if isinstance(value, dict):
if key in other:
item = other.get(key)
if item is not None:
if isinstance(other[key], Mapping):
combined[key] = dict_merge(value, other[key])
else:
combined[key] = other[key]
else:
combined[key] = item
else:
combined[key] = value
elif isinstance(value, list):
if key in other:
item = other.get(key)
if item is not None:
try:
combined[key] = list(set(chain(value, item)))
except TypeError:
value.extend([i for i in item if i not in value])
combined[key] = value
else:
combined[key] = item
else:
combined[key] = value
else:
if key in other:
other_value = other.get(key)
if other_value is not None:
if sort_list(base[key]) != sort_list(other_value):
combined[key] = other_value
else:
combined[key] = value
else:
combined[key] = other_value
else:
combined[key] = value
for key in set(other.keys()).difference(base.keys()):
combined[key] = other.get(key)
return combined
def param_list_to_dict(param_list, unique_key="name", remove_key=True):
"""Rotates a list of dictionaries to be a dictionary of dictionaries.
:param param_list: The aforementioned list of dictionaries
:param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value
behind this key will be the key each dictionary can be found at in the new root dictionary
:param remove_key: If True, remove unique_key from the individual dictionaries before returning.
"""
param_dict = {}
for params in param_list:
params = params.copy()
if remove_key:
name = params.pop(unique_key)
else:
name = params.get(unique_key)
param_dict[name] = params
return param_dict
def conditional(expr, val, cast=None):
match = re.match(r"^(.+)\((.+)\)$", str(expr), re.I)
if match:
op, arg = match.groups()
else:
op = "eq"
if " " in str(expr):
raise AssertionError("invalid expression: cannot contain spaces")
arg = expr
if cast is None and val is not None:
arg = type(val)(arg)
elif callable(cast):
arg = cast(arg)
val = cast(val)
op = next((oper for alias, oper in ALIASES if op == alias), op)
if not hasattr(operator, op) and op not in OPERATORS:
raise ValueError("unknown operator: %s" % op)
func = getattr(operator, op)
return func(val, arg)
def ternary(value, true_val, false_val):
""" value ? true_val : false_val """
if value:
return true_val
else:
return false_val
def remove_default_spec(spec):
for item in spec:
if "default" in spec[item]:
del spec[item]["default"]
def validate_ip_address(address):
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count(".") == 3
def validate_ip_v6_address(address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
return False
return True
def validate_prefix(prefix):
if prefix and not 0 <= int(prefix) <= 32:
return False
return True
def load_provider(spec, args):
provider = args.get("provider") or {}
for key, value in iteritems(spec):
if key not in provider:
if "fallback" in value:
provider[key] = _fallback(value["fallback"])
elif "default" in value:
provider[key] = value["default"]
else:
provider[key] = None
if "authorize" in provider:
# Coerce authorize to provider if a string has somehow snuck in.
provider["authorize"] = boolean(provider["authorize"] or False)
args["provider"] = provider
return provider
def _fallback(fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except basic.AnsibleFallbackNotFound:
pass
def generate_dict(spec):
"""
Generate dictionary which is in sync with argspec
:param spec: A dictionary that is the argspec of the module
:rtype: A dictionary
:returns: A dictionary in sync with argspec with default value
"""
obj = {}
if not spec:
return obj
for key, val in iteritems(spec):
if "default" in val:
dct = {key: val["default"]}
elif "type" in val and val["type"] == "dict":
dct = {key: generate_dict(val["options"])}
else:
dct = {key: None}
obj.update(dct)
return obj
def parse_conf_arg(cfg, arg):
"""
Parse config based on argument
:param cfg: A text string which is a line of configuration.
:param arg: A text string which is to be matched.
:rtype: A text string
:returns: A text string if match is found
"""
match = re.search(r"%s (.+)(\n|$)" % arg, cfg, re.M)
if match:
result = match.group(1).strip()
else:
result = None
return result
def parse_conf_cmd_arg(cfg, cmd, res1, res2=None, delete_str="no"):
"""
Parse config based on command
:param cfg: A text string which is a line of configuration.
:param cmd: A text string which is the command to be matched
:param res1: A text string to be returned if the command is present
:param res2: A text string to be returned if the negate command
is present
:param delete_str: A text string to identify the start of the
negate command
:rtype: A text string
:returns: A text string if match is found
"""
match = re.search(r"\n\s+%s(\n|$)" % cmd, cfg)
if match:
return res1
if res2 is not None:
match = re.search(r"\n\s+%s %s(\n|$)" % (delete_str, cmd), cfg)
if match:
return res2
return None
def get_xml_conf_arg(cfg, path, data="text"):
"""
:param cfg: The top level configuration lxml Element tree object
:param path: The relative xpath w.r.t to top level element (cfg)
to be searched in the xml hierarchy
:param data: The type of data to be returned for the matched xml node.
Valid values are text, tag, attrib, with default as text.
:return: Returns the required type for the matched xml node or else None
"""
match = cfg.xpath(path)
if len(match):
if data == "tag":
result = getattr(match[0], "tag")
elif data == "attrib":
result = getattr(match[0], "attrib")
else:
result = getattr(match[0], "text")
else:
result = None
return result
def remove_empties(cfg_dict):
"""
Generate final config dictionary
:param cfg_dict: A dictionary parsed in the facts system
:rtype: A dictionary
:returns: A dictionary by eliminating keys that have null values
"""
final_cfg = {}
if not cfg_dict:
return final_cfg
for key, val in iteritems(cfg_dict):
dct = None
if isinstance(val, dict):
child_val = remove_empties(val)
if child_val:
dct = {key: child_val}
elif (
isinstance(val, list)
and val
and all(isinstance(x, dict) for x in val)
):
child_val = [remove_empties(x) for x in val]
if child_val:
dct = {key: child_val}
elif val not in [None, [], {}, (), ""]:
dct = {key: val}
if dct:
final_cfg.update(dct)
return final_cfg
def validate_config(spec, data):
"""
Validate if the input data against the AnsibleModule spec format
:param spec: Ansible argument spec
:param data: Data to be validated
:return:
"""
params = basic._ANSIBLE_ARGS
basic._ANSIBLE_ARGS = to_bytes(json.dumps({"ANSIBLE_MODULE_ARGS": data}))
validated_data = basic.AnsibleModule(spec).params
basic._ANSIBLE_ARGS = params
return validated_data
def search_obj_in_list(name, lst, key="name"):
if not lst:
return None
else:
for item in lst:
if item.get(key) == name:
return item
class Template:
def __init__(self):
if not HAS_JINJA2:
raise ImportError(
"jinja2 is required but does not appear to be installed. "
"It can be installed using `pip install jinja2`"
)
self.env = Environment(undefined=StrictUndefined)
self.env.filters.update({"ternary": ternary})
def __call__(self, value, variables=None, fail_on_undefined=True):
variables = variables or {}
if not self.contains_vars(value):
return value
try:
value = self.env.from_string(value).render(variables)
except UndefinedError:
if not fail_on_undefined:
return None
raise
if value:
try:
return ast.literal_eval(value)
except Exception:
return str(value)
else:
return None
def contains_vars(self, data):
if isinstance(data, string_types):
for marker in (
self.env.block_start_string,
self.env.variable_start_string,
self.env.comment_start_string,
):
if marker in data:
return True
return False
| 21,634
|
Python
|
.py
| 553
| 29.65642
| 115
| 0.594676
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,345
|
netconf.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import annotations
import sys
from ansible.module_utils.common.text.converters import to_text, to_bytes
from ansible.module_utils.connection import Connection, ConnectionError
try:
from ncclient.xml_ import NCElement, new_ele, sub_ele
HAS_NCCLIENT = True
except (ImportError, AttributeError):
HAS_NCCLIENT = False
try:
from lxml.etree import Element, fromstring, XMLSyntaxError
except ImportError:
from xml.etree.ElementTree import Element, fromstring
if sys.version_info < (2, 7):
from xml.parsers.expat import ExpatError as XMLSyntaxError
else:
from xml.etree.ElementTree import ParseError as XMLSyntaxError
NS_MAP = {"nc": "urn:ietf:params:xml:ns:netconf:base:1.0"}
def exec_rpc(module, *args, **kwargs):
connection = NetconfConnection(module._socket_path)
return connection.execute_rpc(*args, **kwargs)
class NetconfConnection(Connection):
def __init__(self, socket_path):
super(NetconfConnection, self).__init__(socket_path)
def __rpc__(self, name, *args, **kwargs):
"""Executes the json-rpc and returns the output received
from remote device.
:name: rpc method to be executed over connection plugin that implements jsonrpc 2.0
:args: Ordered list of params passed as arguments to rpc method
:kwargs: Dict of valid key, value pairs passed as arguments to rpc method
For usage refer the respective connection plugin docs.
"""
self.check_rc = kwargs.pop("check_rc", True)
self.ignore_warning = kwargs.pop("ignore_warning", True)
response = self._exec_jsonrpc(name, *args, **kwargs)
if "error" in response:
rpc_error = response["error"].get("data")
return self.parse_rpc_error(
to_bytes(rpc_error, errors="surrogate_then_replace")
)
return fromstring(
to_bytes(response["result"], errors="surrogate_then_replace")
)
def parse_rpc_error(self, rpc_error):
if self.check_rc:
try:
error_root = fromstring(rpc_error)
root = Element("root")
root.append(error_root)
error_list = root.findall(".//nc:rpc-error", NS_MAP)
if not error_list:
raise ConnectionError(
to_text(rpc_error, errors="surrogate_then_replace")
)
warnings = []
for error in error_list:
message_ele = error.find("./nc:error-message", NS_MAP)
if message_ele is None:
message_ele = error.find("./nc:error-info", NS_MAP)
message = (
message_ele.text if message_ele is not None else None
)
severity = error.find("./nc:error-severity", NS_MAP).text
if (
severity == "warning"
and self.ignore_warning
and message is not None
):
warnings.append(message)
else:
raise ConnectionError(
to_text(rpc_error, errors="surrogate_then_replace")
)
return warnings
except XMLSyntaxError:
raise ConnectionError(rpc_error)
def transform_reply():
return b"""<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/|comment()|processing-instruction()">
<xsl:copy>
<xsl:apply-templates/>
</xsl:copy>
</xsl:template>
<xsl:template match="*">
<xsl:element name="{local-name()}">
<xsl:apply-templates select="@*|node()"/>
</xsl:element>
</xsl:template>
<xsl:template match="@*">
<xsl:attribute name="{local-name()}">
<xsl:value-of select="."/>
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
"""
# Note: Workaround for ncclient 0.5.3
def remove_namespaces(data):
if not HAS_NCCLIENT:
raise ImportError(
"ncclient is required but does not appear to be installed. "
"It can be installed using `pip install ncclient`"
)
return NCElement(data, transform_reply()).data_xml
def build_root_xml_node(tag):
return new_ele(tag)
def build_child_xml_node(parent, tag, text=None, attrib=None):
element = sub_ele(parent, tag)
if text:
element.text = to_text(text)
if attrib:
element.attrib.update(attrib)
return element
def build_subtree(parent, path):
element = parent
for field in path.split("/"):
sub_element = build_child_xml_node(element, field)
element = sub_element
return element
| 6,602
|
Python
|
.py
| 146
| 36.369863
| 94
| 0.643046
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,346
|
facts.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py
|
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The facts base class
this contains methods common to all facts subsets
"""
from __future__ import annotations
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network import (
get_resource_connection,
)
from ansible.module_utils.six import iteritems
class FactsBase(object):
"""
The facts base class
"""
def __init__(self, module):
self._module = module
self._warnings = []
self._gather_subset = module.params.get("gather_subset")
self._gather_network_resources = module.params.get(
"gather_network_resources"
)
self._connection = None
if module.params.get("state") not in ["rendered", "parsed"]:
self._connection = get_resource_connection(module)
self.ansible_facts = {"ansible_network_resources": {}}
self.ansible_facts["ansible_net_gather_network_resources"] = list()
self.ansible_facts["ansible_net_gather_subset"] = list()
if not self._gather_subset:
self._gather_subset = ["!config"]
if not self._gather_network_resources:
self._gather_network_resources = ["!all"]
def gen_runable(self, subsets, valid_subsets, resource_facts=False):
""" Generate the runable subset
:param module: The module instance
:param subsets: The provided subsets
:param valid_subsets: The valid subsets
:param resource_facts: A boolean flag
:rtype: list
:returns: The runable subsets
"""
runable_subsets = set()
exclude_subsets = set()
minimal_gather_subset = set()
if not resource_facts:
minimal_gather_subset = frozenset(["default"])
for subset in subsets:
if subset == "all":
runable_subsets.update(valid_subsets)
continue
if subset == "min" and minimal_gather_subset:
runable_subsets.update(minimal_gather_subset)
continue
if subset.startswith("!"):
subset = subset[1:]
if subset == "min":
exclude_subsets.update(minimal_gather_subset)
continue
if subset == "all":
exclude_subsets.update(
valid_subsets - minimal_gather_subset
)
continue
exclude = True
else:
exclude = False
if subset not in valid_subsets:
self._module.fail_json(
msg="Subset must be one of [%s], got %s"
% (
", ".join(sorted(list(valid_subsets))),
subset,
)
)
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(valid_subsets)
runable_subsets.difference_update(exclude_subsets)
return runable_subsets
def get_network_resources_facts(
self, facts_resource_obj_map, resource_facts_type=None, data=None
):
"""
:param fact_resource_subsets:
:param data: previously collected configuration
:return:
"""
if not resource_facts_type:
resource_facts_type = self._gather_network_resources
restorun_subsets = self.gen_runable(
resource_facts_type,
frozenset(facts_resource_obj_map.keys()),
resource_facts=True,
)
if restorun_subsets:
self.ansible_facts["ansible_net_gather_network_resources"] = list(
restorun_subsets
)
instances = list()
for key in restorun_subsets:
fact_cls_obj = facts_resource_obj_map.get(key)
if fact_cls_obj:
instances.append(fact_cls_obj(self._module))
else:
self._warnings.extend(
[
"network resource fact gathering for '%s' is not supported"
% key
]
)
for inst in instances:
inst.populate_facts(self._connection, self.ansible_facts, data)
def get_network_legacy_facts(
self, fact_legacy_obj_map, legacy_facts_type=None
):
if not legacy_facts_type:
legacy_facts_type = self._gather_subset
runable_subsets = self.gen_runable(
legacy_facts_type, frozenset(fact_legacy_obj_map.keys())
)
if runable_subsets:
facts = dict()
# default subset should always returned be with legacy facts subsets
if "default" not in runable_subsets:
runable_subsets.add("default")
self.ansible_facts["ansible_net_gather_subset"] = list(
runable_subsets
)
instances = list()
for key in runable_subsets:
instances.append(fact_legacy_obj_map[key](self._module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
self._warnings.extend(inst.warnings)
for key, value in iteritems(facts):
key = "ansible_net_%s" % key
self.ansible_facts[key] = value
| 5,660
|
Python
|
.py
| 144
| 27.111111
| 95
| 0.555677
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,347
|
base.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py
|
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The base class for all resource modules
"""
from __future__ import annotations
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network import (
get_resource_connection,
)
class ConfigBase(object):
""" The base class for all resource modules
"""
ACTION_STATES = ["merged", "replaced", "overridden", "deleted"]
def __init__(self, module):
self._module = module
self.state = module.params["state"]
self._connection = None
if self.state not in ["rendered", "parsed"]:
self._connection = get_resource_connection(module)
| 766
|
Python
|
.py
| 22
| 30.636364
| 95
| 0.684282
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,348
|
ipaddress.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py
|
# -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file, and this file only, is based on
# Lib/ipaddress.py of cpython
# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
# are retained in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import annotations
import itertools
import struct
# The following makes it easier for us to script updates of the bundled code and is not part of
# upstream
_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"}
__version__ = "1.0.22"
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b"\0"[0] == 0: # Python 3 semantics
def _compat_bytes_to_byte_vals(byt):
return byt
else:
def _compat_bytes_to_byte_vals(byt):
return [struct.unpack(b"!B", b)[0] for b in byt]
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == "big"
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == "big"
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b"!I", intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b"!QQ", intval >> 64, intval & 0xFFFFFFFFFFFFFFFF)
else:
raise NotImplementedError()
if hasattr(int, "bit_length"):
# Not int.bit_length , since that won't work in 2.7 where long exists
def _compat_bit_length(i):
return i.bit_length()
else:
def _compat_bit_length(i):
for res in itertools.count():
if i >> res == 0:
return res
def _compat_range(start, end, step=1):
assert step > 0
i = start
while i < end:
yield i
i += step
class _TotalOrderingMixin(object):
__slots__ = ()
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
"%r does not appear to be an IPv4 or IPv6 address. "
"Did you pass in a bytes (str in Python 2) instead of"
" a unicode object?" % address
)
raise ValueError(
"%r does not appear to be an IPv4 or IPv6 address" % address
)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
"%r does not appear to be an IPv4 or IPv6 network. "
"Did you pass in a bytes (str in Python 2) instead of"
" a unicode object?" % address
)
raise ValueError(
"%r does not appear to be an IPv4 or IPv6 network" % address
)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError(
"%r does not appear to be an IPv4 or IPv6 interface" % address
)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, "big")
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, "big")
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split("/")
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it) # pylint: disable=stop-iteration-return
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, _compat_bit_length(~number & (number - 1)))
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if not (
isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress)
):
raise TypeError("first and last must be IP addresses, not networks")
if first.version != last.version:
raise TypeError(
"%s and %s are not of the same version" % (first, last)
)
if first > last:
raise ValueError("last IP address must be greater than first")
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError("unknown IP version")
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(
_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1,
)
net = ip((first_int, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
def _collapse_addresses_internal(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
# First merge
to_merge = list(addresses)
subnets = {}
while to_merge:
net = to_merge.pop()
supernet = net.supernet()
existing = subnets.get(supernet)
if existing is None:
subnets[supernet] = net
elif existing != net:
# Merge consecutive subnets
del subnets[supernet]
to_merge.append(supernet)
# Then iterate over resulting networks, skipping subsumed subnets
last = None
for net in sorted(subnets.values()):
if last is not None:
# Since they are sorted,
# last.network_address <= net.network_address is a given.
if last.broadcast_address >= net.broadcast_address:
continue
yield net
last = net
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError(
"%s and %s are not of the same version" % (ip, ips[-1])
)
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError(
"%s and %s are not of the same version" % (ip, ips[-1])
)
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError(
"%s and %s are not of the same version" % (ip, nets[-1])
)
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
__slots__ = ()
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def reverse_pointer(self):
"""The name of the reverse DNS pointer for the IP address, e.g.:
>>> ipaddress.ip_address("127.0.0.1").reverse_pointer
'1.0.0.127.in-addr.arpa'
>>> ipaddress.ip_address("2001:db8::1").reverse_pointer
'1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
"""
return self._reverse_pointer()
@property
def version(self):
msg = "%200s has no version specified" % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(
msg % (address, self._max_prefixlen, self._version)
)
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = (
"%r (len %d != %d) is not permitted as an IPv%d address. "
"Did you pass in a bytes (str in Python 2) instead of"
" a unicode object?"
)
raise AddressValueError(
msg % (address, address_len, expected_len, self._version)
)
@classmethod
def _ip_int_from_prefix(cls, prefixlen):
"""Turn the prefix length into a bitwise netmask
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
@classmethod
def _prefix_from_ip_int(cls, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(
ip_int, cls._max_prefixlen
)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = _compat_to_bytes(ip_int, byteslen, "big")
msg = "Netmask pattern %r mixes zeroes & ones"
raise ValueError(msg % details)
return prefixlen
@classmethod
def _report_invalid_netmask(cls, netmask_str):
msg = "%r is not a valid netmask" % netmask_str
raise NetmaskValueError(msg)
@classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except ValueError:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen
@classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
def __reduce__(self):
return self.__class__, (_compat_str(self),)
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
__slots__ = ()
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return self._ip == other._ip and self._version == other._version
except AttributeError:
return NotImplemented
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseAddress):
raise TypeError(
"%s and %s are not of the same type" % (self, other)
)
if self._version != other._version:
raise TypeError(
"%s and %s are not of the same version" % (self, other)
)
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
def __reduce__(self):
return self.__class__, (self._ip,)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return "%s/%d" % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError("address out of range")
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError("address out of range")
return self._address_class(broadcast + n)
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseNetwork):
raise TypeError(
"%s and %s are not of the same type" % (self, other)
)
if self._version != other._version:
raise TypeError(
"%s and %s are not of the same version" % (self, other)
)
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (
self._version == other._version
and self.network_address == other.network_address
and int(self.netmask) == int(other.netmask)
)
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (
int(self.network_address)
<= int(other._ip)
<= int(self.broadcast_address)
)
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other
or (
other.network_address in self
or (other.broadcast_address in self)
)
)
@property
def broadcast_address(self):
x = self._cache.get("broadcast_address")
if x is None:
x = self._address_class(
int(self.network_address) | int(self.hostmask)
)
self._cache["broadcast_address"] = x
return x
@property
def hostmask(self):
x = self._cache.get("hostmask")
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache["hostmask"] = x
return x
@property
def with_prefixlen(self):
return "%s/%d" % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return "%s/%s" % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return "%s/%s" % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = "%200s has no associated address class" % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
list(addr1.address_exclude(addr2)) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
list(addr1.address_exclude(addr2)) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of differing address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError(
"%s and %s are not of the same version" % (self, other)
)
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not other.subnet_of(self):
raise ValueError("%s not contained in %s" % (other, self))
if other == self:
return
# Make sure we're comparing the network of other.
other = other.__class__(
"%s/%s" % (other.network_address, other.prefixlen)
)
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if other.subnet_of(s1):
yield s2
s1, s2 = s1.subnets()
elif other.subnet_of(s2):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError(
"Error performing exclusion: "
"s1: %s s2: %s other: %s" % (s1, s2, other)
)
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError(
"Error performing exclusion: "
"s1: %s s2: %s other: %s" % (s1, s2, other)
)
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError(
"%s and %s are not of the same type" % (self, other)
)
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError("new prefix must be longer")
if prefixlen_diff != 1:
raise ValueError("cannot set prefixlen_diff and new_prefix")
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError("prefix length diff must be > 0")
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
"prefix length diff %d is invalid for netblock %s"
% (new_prefixlen, self)
)
start = int(self.network_address)
end = int(self.broadcast_address) + 1
step = (int(self.hostmask) + 1) >> prefixlen_diff
for new_addr in _compat_range(start, end, step):
current = self.__class__((new_addr, new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError("new prefix must be shorter")
if prefixlen_diff != 1:
raise ValueError("cannot set prefixlen_diff and new_prefix")
prefixlen_diff = self._prefixlen - new_prefix
new_prefixlen = self.prefixlen - prefixlen_diff
if new_prefixlen < 0:
raise ValueError(
"current prefixlen is %d, cannot have a prefixlen_diff of %d"
% (self.prefixlen, prefixlen_diff)
)
return self.__class__(
(
int(self.network_address)
& (int(self.netmask) << prefixlen_diff),
new_prefixlen,
)
)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (
self.network_address.is_multicast
and self.broadcast_address.is_multicast
)
@staticmethod
def _is_subnet_of(a, b):
try:
# Always false if one is v4 and the other is v6.
if a._version != b._version:
raise TypeError(
"%s and %s are not of the same version" % (a, b)
)
return (
b.network_address <= a.network_address
and b.broadcast_address >= a.broadcast_address
)
except AttributeError:
raise TypeError(
"Unable to test subnet containment "
"between %s and %s" % (a, b)
)
def subnet_of(self, other):
"""Return True if this network is a subnet of other."""
return self._is_subnet_of(self, other)
def supernet_of(self, other):
"""Return True if this network is a supernet of other."""
return self._is_subnet_of(other, self)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (
self.network_address.is_reserved
and self.broadcast_address.is_reserved
)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (
self.network_address.is_link_local
and self.broadcast_address.is_link_local
)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (
self.network_address.is_private
and self.broadcast_address.is_private
)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (
self.network_address.is_unspecified
and self.broadcast_address.is_unspecified
)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (
self.network_address.is_loopback
and self.broadcast_address.is_loopback
)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 4
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset("0123456789")
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
_max_prefixlen = IPV4LENGTH
# There are only a handful of valid v4 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
def _explode_shorthand_ip_string(self):
return _compat_str(self)
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
try:
# Check for a netmask in prefix length form
prefixlen = cls._prefix_from_prefix_string(arg)
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
prefixlen = cls._prefix_from_ip_string(arg)
netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError("Address cannot be empty")
octets = ip_str.split(".")
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), "big"
)
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_octet(cls, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == "0":
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
@classmethod
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return ".".join(
_compat_str(
struct.unpack(b"!B", b)[0] if isinstance(b, bytes) else b
)
for b in _compat_to_bytes(ip_int, 4, "big")
)
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split(".")
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv4 address.
This implements the method described in RFC1035 3.5.
"""
reverse_octets = _compat_str(self).split(".")[::-1]
return ".".join(reverse_octets) + ".in-addr.arpa"
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
__slots__ = ("_ip", "__weakref__")
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, "big")
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if "/" in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in self._constants._reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
return (
self not in self._constants._public_network and not self.is_private
)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in self._constants._multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self == self._constants._unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in self._constants._loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in self._constants._linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv4Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv4Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return "%s/%d" % (
self._string_from_ip_int(self._ip),
self.network.prefixlen,
)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return (
self.network < other.network
or self.network == other.network
and address_less
)
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen)
@property
def with_netmask(self):
return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask)
@property
def with_hostmask(self):
return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionally equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (_compat_int_types, bytes)):
self.network_address = IPv4Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen
)
# fixme: address/network test here.
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
# We weren't given an address[1]
arg = self._max_prefixlen
self.network_address = IPv4Address(address[0])
self.netmask, self._prefixlen = self._make_netmask(arg)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError("%s has host bits set" % self)
else:
self.network_address = IPv4Address(
packed & int(self.netmask)
)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (
IPv4Address(int(self.network_address) & int(self.netmask))
!= self.network_address
):
raise ValueError("%s has host bits set" % self)
self.network_address = IPv4Address(
int(self.network_address) & int(self.netmask)
)
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry.
"""
return (
not (
self.network_address in IPv4Network("100.64.0.0/10")
and self.broadcast_address in IPv4Network("100.64.0.0/10")
)
and not self.is_private
)
class _IPv4Constants(object):
_linklocal_network = IPv4Network("169.254.0.0/16")
_loopback_network = IPv4Network("127.0.0.0/8")
_multicast_network = IPv4Network("224.0.0.0/4")
_public_network = IPv4Network("100.64.0.0/10")
_private_networks = [
IPv4Network("0.0.0.0/8"),
IPv4Network("10.0.0.0/8"),
IPv4Network("127.0.0.0/8"),
IPv4Network("169.254.0.0/16"),
IPv4Network("172.16.0.0/12"),
IPv4Network("192.0.0.0/29"),
IPv4Network("192.0.0.170/31"),
IPv4Network("192.0.2.0/24"),
IPv4Network("192.168.0.0/16"),
IPv4Network("198.18.0.0/15"),
IPv4Network("198.51.100.0/24"),
IPv4Network("203.0.113.0/24"),
IPv4Network("240.0.0.0/4"),
IPv4Network("255.255.255.255/32"),
]
_reserved_network = IPv4Network("240.0.0.0/4")
_unspecified_address = IPv4Address("0.0.0.0")
IPv4Address._constants = _IPv4Constants
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 6
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset("0123456789ABCDEFabcdef")
_max_prefixlen = IPV6LENGTH
# There are only a bunch of valid v6 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
prefixlen = cls._prefix_from_prefix_string(arg)
netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError("Address cannot be empty")
parts = ip_str.split(":")
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if "." in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append("%x" % ((ipv4_int >> 16) & 0xFFFF))
parts.append("%x" % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (
_max_parts - 1,
ip_str,
)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in _compat_range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_hextet(cls, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
@classmethod
def _compress_hextets(cls, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == "0":
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (
best_doublecolon_start + best_doublecolon_len
)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += [""]
hextets[best_doublecolon_start:best_doublecolon_end] = [""]
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [""] + hextets
return hextets
@classmethod
def _string_from_ip_int(cls, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(cls._ip)
if ip_int > cls._ALL_ONES:
raise ValueError("IPv6 address is too large")
hex_str = "%032x" % ip_int
hextets = ["%x" % int(hex_str[x : x + 4], 16) for x in range(0, 32, 4)]
hextets = cls._compress_hextets(hextets)
return ":".join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = "%032x" % ip_int
parts = [hex_str[x : x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return "%s/%d" % (":".join(parts), self._prefixlen)
return ":".join(parts)
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv6 address.
This implements the method described in RFC3596 2.5.
"""
reverse_chars = self.exploded[::-1].replace(":", "")
return ".".join(reverse_chars) + ".ip6.arpa"
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
__slots__ = ("_ip", "__weakref__")
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, "big")
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if "/" in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in self._constants._multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return any(self in x for x in self._constants._reserved_networks)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in self._constants._linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in self._constants._sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv6-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, true if the address is not reserved per
iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (
IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF),
)
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv6Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return "%s/%d" % (
self._string_from_ip_int(self._ip),
self.network.prefixlen,
)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return (
self.network < other.network
or self.network == other.network
and address_less
)
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen)
@property
def with_netmask(self):
return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask)
@property
def with_hostmask(self):
return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer or packed address
if isinstance(address, (bytes, _compat_int_types)):
self.network_address = IPv6Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen
)
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
self.network_address = IPv6Address(address[0])
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError("%s has host bits set" % self)
else:
self.network_address = IPv6Address(
packed & int(self.netmask)
)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (
IPv6Address(int(self.network_address) & int(self.netmask))
!= self.network_address
):
raise ValueError("%s has host bits set" % self)
self.network_address = IPv6Address(
int(self.network_address) & int(self.netmask)
)
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast + 1):
yield self._address_class(x)
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (
self.network_address.is_site_local
and self.broadcast_address.is_site_local
)
class _IPv6Constants(object):
_linklocal_network = IPv6Network("fe80::/10")
_multicast_network = IPv6Network("ff00::/8")
_private_networks = [
IPv6Network("::1/128"),
IPv6Network("::/128"),
IPv6Network("::ffff:0:0/96"),
IPv6Network("100::/64"),
IPv6Network("2001::/23"),
IPv6Network("2001:2::/48"),
IPv6Network("2001:db8::/32"),
IPv6Network("2001:10::/28"),
IPv6Network("fc00::/7"),
IPv6Network("fe80::/10"),
]
_reserved_networks = [
IPv6Network("::/8"),
IPv6Network("100::/8"),
IPv6Network("200::/7"),
IPv6Network("400::/6"),
IPv6Network("800::/5"),
IPv6Network("1000::/4"),
IPv6Network("4000::/3"),
IPv6Network("6000::/3"),
IPv6Network("8000::/3"),
IPv6Network("A000::/3"),
IPv6Network("C000::/3"),
IPv6Network("E000::/4"),
IPv6Network("F000::/5"),
IPv6Network("F800::/6"),
IPv6Network("FE00::/9"),
]
_sitelocal_network = IPv6Network("fec0::/10")
IPv6Address._constants = _IPv6Constants
| 83,165
|
Python
|
.py
| 2,052
| 30.610136
| 95
| 0.59074
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,349
|
connection_persistent.py
|
ansible_ansible/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/connection_persistent.py
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r"""
options:
import_modules:
type: boolean
description:
- Reduce CPU usage and network module execution time
by enabling direct execution. Instead of the module being packaged
and executed by the shell, it will be directly executed by the Ansible
control node using the same python interpreter as the Ansible process.
Note- Incompatible with C(asynchronous mode).
Note- Python 3 and Ansible 2.9.16 or greater required.
Note- With Ansible 2.9.x fully qualified modules names are required in tasks.
default: true
ini:
- section: ansible_network
key: import_modules
env:
- name: ANSIBLE_NETWORK_IMPORT_MODULES
vars:
- name: ansible_network_import_modules
persistent_connect_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait when trying to initially
establish a persistent connection. If this value expires before the connection
to the remote device is completed, the connection will fail.
default: 30
ini:
- section: persistent_connection
key: connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
vars:
- name: ansible_connect_timeout
persistent_command_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait for a command to
return from the remote device. If this timer is exceeded before the
command returns, the connection plugin will raise an exception and
close.
default: 30
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
persistent_log_messages:
type: boolean
description:
- This flag will enable logging the command executed and response received from
target device in the ansible log file. For this option to work 'log_path' ansible
configuration option is required to be set to a file path with write access.
- Be sure to fully understand the security implications of enabling this
option as it could create a security vulnerability by logging sensitive information in log file.
default: False
ini:
- section: persistent_connection
key: log_messages
env:
- name: ANSIBLE_PERSISTENT_LOG_MESSAGES
vars:
- name: ansible_persistent_log_messages
"""
| 2,695
|
Python
|
.py
| 71
| 32.366197
| 104
| 0.716794
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,350
|
timeout.py
|
ansible_ansible/test/integration/targets/test_utils/scripts/timeout.py
|
#!/usr/bin/env python
from __future__ import annotations
import argparse
import subprocess
import sys
parser = argparse.ArgumentParser()
parser.add_argument('duration', type=int)
parser.add_argument('command', nargs='+')
args = parser.parse_args()
try:
p = subprocess.run(
' '.join(args.command),
shell=True,
timeout=args.duration,
check=False,
)
sys.exit(p.returncode)
except subprocess.TimeoutExpired:
sys.exit(124)
| 469
|
Python
|
.py
| 19
| 21
| 41
| 0.709172
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,351
|
core.py
|
ansible_ansible/test/integration/targets/plugin_loader/override/filter_plugins/core.py
|
from __future__ import annotations
def do_flag(myval):
return 'flagged'
class FilterModule(object):
""" Ansible core jinja2 filters """
def filters(self):
return {
# jinja2 overrides
'flag': do_flag,
'flatten': do_flag,
}
| 291
|
Python
|
.py
| 11
| 19.181818
| 39
| 0.578182
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,352
|
custom.py
|
ansible_ansible/test/integration/targets/plugin_loader/file_collision/roles/r2/filter_plugins/custom.py
|
from __future__ import annotations
def do_nothing(myval):
return myval
class FilterModule(object):
""" Ansible core jinja2 filters """
def filters(self):
return {
'filter2': do_nothing,
}
| 233
|
Python
|
.py
| 9
| 19.888889
| 39
| 0.630137
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,353
|
custom.py
|
ansible_ansible/test/integration/targets/plugin_loader/file_collision/roles/r1/filter_plugins/custom.py
|
from __future__ import annotations
def do_nothing(myval):
return myval
class FilterModule(object):
""" Ansible core jinja2 filters """
def filters(self):
return {
'filter1': do_nothing,
'filter3': do_nothing,
}
| 268
|
Python
|
.py
| 10
| 20.1
| 39
| 0.608696
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,354
|
_underscore.py
|
ansible_ansible/test/integration/targets/plugin_loader/normal/library/_underscore.py
|
#!/usr/bin/python
from __future__ import annotations
import json
def main():
print(json.dumps(dict(changed=False, source='legacy_library_dir')))
if __name__ == '__main__':
main()
| 192
|
Python
|
.py
| 7
| 24.571429
| 71
| 0.677778
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,355
|
self_referential.py
|
ansible_ansible/test/integration/targets/plugin_loader/normal/action_plugins/self_referential.py
|
# Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.plugins.action import ActionBase
import sys
# reference our own module from sys.modules while it's being loaded to ensure the importer behaves properly
try:
mod = sys.modules[__name__]
except KeyError:
raise Exception(f'module {__name__} is not accessible via sys.modules, likely a pluginloader bug')
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
result['changed'] = False
result['msg'] = 'self-referential action loaded and ran successfully'
return result
| 918
|
Python
|
.py
| 20
| 40.7
| 107
| 0.716854
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,356
|
hello.py
|
ansible_ansible/test/integration/targets/ansible-test-integration/ansible_collections/ns/col/plugins/modules/hello.py
|
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
module: hello
short_description: Hello test module
description: Hello test module.
options:
name:
description: Name to say hello to.
type: str
author:
- Ansible Core Team
"""
EXAMPLES = """
- hello:
"""
RETURN = """"""
from ansible.module_utils.basic import AnsibleModule
from ..module_utils.my_util import hello
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str'),
),
)
module.exit_json(**say_hello(module.params['name']))
def say_hello(name):
return dict(
message=hello(name),
)
if __name__ == '__main__':
main()
| 791
|
Python
|
.py
| 33
| 20.424242
| 92
| 0.671582
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,357
|
my_util.py
|
ansible_ansible/test/integration/targets/ansible-test-integration/ansible_collections/ns/col/plugins/module_utils/my_util.py
|
from __future__ import annotations
def hello(name):
return 'Hello %s' % name
| 83
|
Python
|
.py
| 3
| 24.666667
| 34
| 0.705128
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,358
|
test_run_once.py
|
ansible_ansible/test/integration/targets/debugger/test_run_once.py
|
#!/usr/bin/env python
from __future__ import annotations
import io
import os
import sys
import pexpect
env_vars = {
'ANSIBLE_NOCOLOR': 'True',
'ANSIBLE_RETRY_FILES_ENABLED': 'False',
}
env = os.environ.copy()
env.update(env_vars)
with io.BytesIO() as logfile:
debugger_test_test = pexpect.spawn(
'ansible-playbook',
args=['test_run_once_playbook.yml'] + sys.argv[1:],
timeout=10,
env=env
)
debugger_test_test.logfile = logfile
debugger_test_test.expect_exact('TASK: Task 1 (debug)> ')
debugger_test_test.send('task.args["that"] = "true"\r')
debugger_test_test.expect_exact('TASK: Task 1 (debug)> ')
debugger_test_test.send('r\r')
debugger_test_test.expect(pexpect.EOF)
debugger_test_test.close()
assert str(logfile.getvalue()).count('Task 2 executed') == 2
| 846
|
Python
|
.py
| 27
| 27.037037
| 64
| 0.675309
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,359
|
bad_filter.py
|
ansible_ansible/test/integration/targets/jinja_plugins/filter_plugins/bad_filter.py
|
# Copyright (c) 2021 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class FilterModule:
def filters(self):
raise TypeError('bad_filter')
| 261
|
Python
|
.py
| 6
| 40
| 92
| 0.730159
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,360
|
good_filter.py
|
ansible_ansible/test/integration/targets/jinja_plugins/filter_plugins/good_filter.py
|
# Copyright (c) 2021 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class FilterModule:
def filters(self):
return {
'hello': lambda x: 'Hello, %s!' % x,
}
| 299
|
Python
|
.py
| 8
| 32
| 92
| 0.645833
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,361
|
good_collection_test.py
|
ansible_ansible/test/integration/targets/jinja_plugins/collections/ansible_collections/foo/bar/plugins/test/good_collection_test.py
|
# Copyright (c) 2021 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class TestModule:
def tests(self):
return {
'world': lambda x: x.lower() == 'world',
}
| 299
|
Python
|
.py
| 8
| 32
| 92
| 0.645833
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,362
|
bad_collection_test.py
|
ansible_ansible/test/integration/targets/jinja_plugins/collections/ansible_collections/foo/bar/plugins/test/bad_collection_test.py
|
# Copyright (c) 2021 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class TestModule:
def tests(self):
raise TypeError('bad_collection_test')
| 266
|
Python
|
.py
| 6
| 40.833333
| 92
| 0.731518
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,363
|
bad_collection_filter.py
|
ansible_ansible/test/integration/targets/jinja_plugins/collections/ansible_collections/foo/bar/plugins/filter/bad_collection_filter.py
|
# Copyright (c) 2021 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class FilterModule:
def filters(self):
raise TypeError('bad_collection_filter')
| 272
|
Python
|
.py
| 6
| 41.833333
| 92
| 0.737643
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,364
|
bad_collection_filter2.py
|
ansible_ansible/test/integration/targets/jinja_plugins/collections/ansible_collections/foo/bar/plugins/filter/bad_collection_filter2.py
|
# Copyright (c) 2021 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class FilterModule:
pass
| 209
|
Python
|
.py
| 5
| 39.4
| 92
| 0.751244
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,365
|
good_collection_filter.py
|
ansible_ansible/test/integration/targets/jinja_plugins/collections/ansible_collections/foo/bar/plugins/filter/good_collection_filter.py
|
# Copyright (c) 2021 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class FilterModule:
def filters(self):
return {
'hello': lambda x: 'Hello, %s!' % x,
}
| 299
|
Python
|
.py
| 8
| 32
| 92
| 0.645833
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,366
|
bad_test.py
|
ansible_ansible/test/integration/targets/jinja_plugins/test_plugins/bad_test.py
|
# Copyright (c) 2021 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class TestModule:
def tests(self):
raise TypeError('bad_test')
| 255
|
Python
|
.py
| 6
| 39
| 92
| 0.723577
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,367
|
good_test.py
|
ansible_ansible/test/integration/targets/jinja_plugins/test_plugins/good_test.py
|
# Copyright (c) 2021 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class TestModule:
def tests(self):
return {
'world': lambda x: x.lower() == 'world',
}
| 299
|
Python
|
.py
| 8
| 32
| 92
| 0.645833
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,368
|
helloworld.py
|
ansible_ansible/test/integration/targets/want_json_modules_posix/library/helloworld.py
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# WANT_JSON
from __future__ import annotations
import json
import sys
try:
with open(sys.argv[1], 'r') as f:
data = json.load(f)
except (IOError, OSError, IndexError):
print(json.dumps(dict(msg="No argument file provided", failed=True)))
sys.exit(1)
salutation = data.get('salutation', 'Hello')
name = data.get('name', 'World')
print(json.dumps(dict(msg='%s, %s!' % (salutation, name))))
| 1,083
|
Python
|
.py
| 28
| 36.785714
| 73
| 0.741905
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,369
|
constructed_with_hostvars.py
|
ansible_ansible/test/integration/targets/inventory/inventory_plugins/constructed_with_hostvars.py
|
# Copyright (c) 2022 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: constructed_with_hostvars
options:
plugin:
description: the load name of the plugin
extends_documentation_fragment:
- constructed
"""
from ansible.errors import AnsibleParserError
from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
class InventoryModule(BaseInventoryPlugin, Constructable):
NAME = 'constructed_with_hostvars'
def verify_file(self, path):
return super(InventoryModule, self).verify_file(path) and path.endswith(('constructed.yml', 'constructed.yaml'))
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path, cache)
config = self._read_config_data(path)
strict = self.get_option('strict')
try:
for host in inventory.hosts:
hostvars = {}
# constructed groups based on conditionals
self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host, strict=strict, fetch_hostvars=True)
# constructed groups based variable values
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host, strict=strict, fetch_hostvars=True)
except Exception as e:
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)), orig_exc=e)
| 1,632
|
Python
|
.py
| 31
| 44.935484
| 131
| 0.699182
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,370
|
unexpected.py
|
ansible_ansible/test/integration/targets/unexpected_executor_exception/action_plugins/unexpected.py
|
from __future__ import annotations
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
raise Exception('boom')
| 222
|
Python
|
.py
| 6
| 32.666667
| 45
| 0.754717
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,371
|
helloworld.py
|
ansible_ansible/test/integration/targets/include_import/playbook/sub_playbook/library/helloworld.py
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(argument_spec={})
module.exit_json(msg='Hello, World!')
if __name__ == '__main__':
main()
| 902
|
Python
|
.py
| 22
| 39.136364
| 70
| 0.757159
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,372
|
module_that_logs.py
|
ansible_ansible/test/integration/targets/module_no_log/library/module_that_logs.py
|
#!/usr/bin/python
from __future__ import annotations
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(argument_spec=dict(
number=dict(type='int'),
))
module.log('My number is: (%d)' % module.params['number'])
module.exit_json()
if __name__ == '__main__':
main()
| 335
|
Python
|
.py
| 11
| 26.363636
| 62
| 0.660377
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,373
|
module_that_has_secret.py
|
ansible_ansible/test/integration/targets/module_no_log/library/module_that_has_secret.py
|
#!/usr/bin/python
from __future__ import annotations
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(argument_spec=dict(
secret=dict(no_log=True),
notsecret=dict(no_log=False),
))
msg = "My secret is: (%s), but don't tell %s" % (module.params['secret'], module.params['notsecret'])
module.exit_json(msg=msg, changed=bool(module.params['secret'] == module.params['notsecret']))
if __name__ == '__main__':
main()
| 493
|
Python
|
.py
| 12
| 36.583333
| 105
| 0.669474
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,374
|
create-role-archive.py
|
ansible_ansible/test/integration/targets/ansible-galaxy-role/files/create-role-archive.py
|
#!/usr/bin/env python
"""Create a role archive which overwrites an arbitrary file."""
from __future__ import annotations
import argparse
import os
import pathlib
import tarfile
import tempfile
def main() -> None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('archive', type=pathlib.Path, help='archive to create')
parser.add_argument('content', type=pathlib.Path, help='content to write')
parser.add_argument('target', type=pathlib.Path, help='file to overwrite')
args = parser.parse_args()
create_archive(args.archive, args.content, args.target)
def generate_files_from_path(path):
if os.path.isdir(path):
for subpath in os.listdir(path):
_path = os.path.join(path, subpath)
yield from generate_files_from_path(_path)
elif os.path.isfile(path):
yield pathlib.Path(path)
def create_archive(archive_path: pathlib.Path, content_path: pathlib.Path, target_path: pathlib.Path) -> None:
with (
tarfile.open(name=archive_path, mode='w') as role_archive,
tempfile.TemporaryDirectory() as temp_dir_name,
):
temp_dir_path = pathlib.Path(temp_dir_name)
meta_main_path = temp_dir_path / 'meta' / 'main.yml'
meta_main_path.parent.mkdir()
meta_main_path.write_text('')
symlink_path = temp_dir_path / 'symlink'
symlink_path.symlink_to(target_path)
role_archive.add(meta_main_path)
role_archive.add(symlink_path)
for path in generate_files_from_path(content_path):
if path == content_path:
arcname = str(symlink_path)
else:
arcname = os.path.join(temp_dir_path, path)
content_tarinfo = role_archive.gettarinfo(path, arcname)
with path.open('rb') as file_content:
role_archive.addfile(content_tarinfo, file_content)
if __name__ == '__main__':
main()
| 1,947
|
Python
|
.py
| 45
| 35.955556
| 110
| 0.661718
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,375
|
fix-urls.py
|
ansible_ansible/test/integration/targets/ansible-doc/fix-urls.py
|
"""Unwrap URLs to docs.ansible.com and remove version"""
from __future__ import annotations
import re
import sys
def main():
data = sys.stdin.read()
data = re.sub('(https://docs\\.ansible\\.com/[^ ]+)\n +([^ ]+)\n', '\\1\\2\n', data, flags=re.MULTILINE)
data = re.sub('https://docs\\.ansible\\.com/ansible(|-core)/(?:[^/]+)/', 'https://docs.ansible.com/ansible\\1/devel/', data)
sys.stdout.write(data)
if __name__ == '__main__':
main()
| 461
|
Python
|
.py
| 11
| 38.636364
| 128
| 0.604494
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,376
|
other.py
|
ansible_ansible/test/integration/targets/ansible-doc/filter_plugins/other.py
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.utils.display import Display
display = Display()
def donothing(a):
return a
class FilterModule(object):
""" Ansible core jinja2 filters """
def filters(self):
return {
'donothing': donothing,
'nodocs': donothing,
'split': donothing,
'b64decode': donothing,
}
| 484
|
Python
|
.py
| 15
| 25.666667
| 92
| 0.644252
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,377
|
test1.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol3/plugins/modules/test1.py
|
#!/usr/bin/python
from __future__ import annotations
DOCUMENTATION = """
module: test1
short_description: Foo module in testcol3
description:
- This is a foo module.
author:
- me
"""
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(),
)
module.exit_json()
if __name__ == '__main__':
main()
| 389
|
Python
|
.py
| 18
| 18.388889
| 52
| 0.68595
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,378
|
test2.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol4/plugins/modules/test2.py
|
#!/usr/bin/python
from __future__ import annotations
DOCUMENTATION = """
module: test2
short_description: Foo module in testcol4
description:
- This is a foo module.
author:
- me
"""
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(),
)
module.exit_json()
if __name__ == '__main__':
main()
| 389
|
Python
|
.py
| 18
| 18.388889
| 52
| 0.68595
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,379
|
module.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol2/plugins/doc_fragments/module.py
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
DOCUMENTATION = r"""
options:
testcol2option:
description:
- An option taken from testcol2
type: str
version_added: 1.0.0
testcol2option2:
description:
- Another option taken from testcol2
type: str
"""
| 468
|
Python
|
.py
| 16
| 23.25
| 92
| 0.645089
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,380
|
version_added.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol2/plugins/doc_fragments/version_added.py
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
DOCUMENTATION = r"""
options: {}
version_added: 1.0.0
"""
| 251
|
Python
|
.py
| 8
| 29.375
| 92
| 0.707113
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,381
|
plugin.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol2/plugins/doc_fragments/plugin.py
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
DOCUMENTATION = r"""
options:
testcol2option:
description:
- A plugin option taken from testcol2
type: str
version_added: 1.0.0
ini:
- key: foo
section: bar
version_added: 1.1.0
deprecated:
alternative: none
why: Test deprecation
version: '3.0.0'
env:
- name: FOO_BAR
version_added: 1.2.0
deprecated:
alternative: none
why: Test deprecation
removed_at_date: 2020-01-31
vars:
- name: foobar
version_added: 1.3.0
deprecated:
alternative: none
why: Test deprecation
removed_at_date: 2040-12-31
testcol2depr:
description:
- A plugin option taken from testcol2 that is deprecated
type: str
deprecated:
alternative: none
why: Test option deprecation
version: '2.0.0'
"""
| 1,276
|
Python
|
.py
| 42
| 19.238095
| 92
| 0.518699
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,382
|
deprecation.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol2/plugins/doc_fragments/deprecation.py
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
DOCUMENTATION = r"""
options: {}
deprecated:
alternative: Use some other module
why: Test deprecation
removed_in: '3.0.0'
"""
| 331
|
Python
|
.py
| 11
| 27.272727
| 92
| 0.705696
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,383
|
statichost.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/inventory/statichost.py
|
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
inventory: statichost
short_description: Add a single host
description: Add a single host
extends_documentation_fragment:
- inventory_cache
options:
plugin:
description: plugin name (must be statichost)
required: true
hostname:
description: Toggle display of stderr even when script was successful
required: True
"""
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
class InventoryModule(BaseInventoryPlugin, Cacheable):
NAME = 'testns.content_adj.statichost'
def verify_file(self, path):
pass
def parse(self, inventory, loader, path, cache=None):
pass
| 865
|
Python
|
.py
| 24
| 30.583333
| 92
| 0.722356
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,384
|
test_test.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/test/test_test.py
|
from __future__ import annotations
def yolo(value):
return True
class TestModule(object):
""" Ansible core jinja2 tests """
def tests(self):
return {
# failure testing
'yolo': yolo,
}
| 241
|
Python
|
.py
| 10
| 17.4
| 37
| 0.584071
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,385
|
noop_vars_plugin.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/vars/noop_vars_plugin.py
|
from __future__ import annotations
DOCUMENTATION = """
vars: noop_vars_plugin
short_description: Do NOT load host and group vars
description: don't test loading host and group vars from a collection
options:
stage:
default: all
choices: ['all', 'inventory', 'task']
type: str
ini:
- key: stage
section: testns.testcol.noop_vars_plugin
env:
- name: ANSIBLE_VARS_PLUGIN_STAGE
extends_documentation_fragment:
- testns.testcol2.deprecation
"""
from ansible.plugins.vars import BaseVarsPlugin
class VarsModule(BaseVarsPlugin):
def get_vars(self, loader, path, entities, cache=True):
super(VarsModule, self).get_vars(loader, path, entities)
return {'collection': 'yes', 'notreal': 'value'}
| 815
|
Python
|
.py
| 23
| 28.73913
| 73
| 0.663278
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,386
|
notrealmodule.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/notrealmodule.py
|
#!/usr/bin/python
from __future__ import annotations
import json
def main():
print(json.dumps(dict(changed=False, source='testns.testcol.notrealmodule')))
if __name__ == '__main__':
main()
| 202
|
Python
|
.py
| 7
| 26
| 81
| 0.694737
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,387
|
randommodule.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/randommodule.py
|
#!/usr/bin/python
from __future__ import annotations
DOCUMENTATION = r"""
---
module: randommodule
short_description: A random module
description:
- A random module.
- See O(foo.bar.baz#role:main:foo=bar) for how this is used in the P(foo.bar.baz#role)'s C(main) entrypoint.
- See L(the docsite,https://docs.ansible.com/ansible-core/devel/) for more information on ansible-core.
- This module is not related to the M(ansible.builtin.copy) module. HORIZONTALLINE You might also be interested
in R(ansible_python_interpreter, ansible_python_interpreter).
- Sometimes you have M(broken markup) that will result in error messages.
author:
- Ansible Core Team
version_added: 1.0.0
deprecated:
alternative: Use some other module
why: Test deprecation
removed_in: '3.0.0'
options:
test:
description: Some text. Consider not using O(ignore:foo=bar).
type: str
version_added: 1.2.0
sub:
description: Suboptions. Contains O(sub.subtest), which can be set to V(123). You can use E(TEST_ENV) to set this.
type: dict
suboptions:
subtest:
description: A suboption. Not compatible to O(ansible.builtin.copy#module:path=c:\\foo\(1\).txt).
type: int
version_added: 1.1.0
# The following is the wrong syntax, and should not get processed
# by add_collection_to_versions_and_dates()
options:
subtest2:
description: Another suboption. Useful when P(ansible.builtin.shuffle#filter) is used with value V([a,b,\),d\\]).
type: float
version_added: 1.1.0
# The following is not supported in modules, and should not get processed
# by add_collection_to_versions_and_dates()
env:
- name: TEST_ENV
version_added: 1.0.0
deprecated:
alternative: none
why: Test deprecation
removed_in: '2.0.0'
version: '2.0.0'
extends_documentation_fragment:
- testns.testcol2.module
seealso:
- module: ansible.builtin.ping
- module: ansible.builtin.uri
description: Use this to fetch an URI
- module: testns.testcol.test
- module: testns.testcol.fakemodule
description: A fake module
- link: https://docs.ansible.com
name: Ansible docsite
description: See also the Ansible docsite.
- ref: foo_bar
description: Some foo bar.
notes:
- This is a note.
- |-
This is a multi-paragraph note.
This is its second paragraph.
This is just another line in the second paragraph.
Eventually this will break into a new line,
depending with which line width this is rendered.
"""
EXAMPLES = """
"""
RETURN = r"""
z_last:
description: A last result.
type: str
returned: success
version_added: 1.3.0
m_middle:
description:
- This should be in the middle.
- Has some more data.
- Check out RV(m_middle.suboption) and compare it to RV(a_first=foo) and RV(community.general.foo#lookup:value).
type: dict
returned: success and 1st of month
contains:
suboption:
description: A suboption.
type: str
choices: [ARF, BARN, c_without_capital_first_letter]
version_added: 1.4.0
a_first:
description: A first result. Use RV(a_first=foo\(bar\\baz\)bam).
type: str
returned: success
"""
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(),
)
module.exit_json()
if __name__ == '__main__':
main()
| 3,704
|
Python
|
.py
| 107
| 27.747664
| 129
| 0.644711
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,388
|
fakemodule.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/fakemodule.py
|
#!/usr/bin/python
from __future__ import annotations
DOCUMENTATION = """
module: fakemodule
short_desciption: fake module
description:
- this is a fake module
version_added: 1.0.0
options:
_notreal:
description: really not a real option
author:
- me
"""
import json
def main():
print(json.dumps(dict(changed=False, source='testns.testcol.fakemodule')))
if __name__ == '__main__':
main()
| 463
|
Python
|
.py
| 19
| 19.421053
| 78
| 0.638444
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,389
|
subdir_module.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/database/database_type/subdir_module.py
|
#!/usr/bin/python
from __future__ import annotations
DOCUMENTATION = """
---
module: subdir_module
short_description: A module in multiple subdirectories
description:
- A module in multiple subdirectories
author:
- Ansible Core Team
version_added: 1.0.0
options: {}
"""
EXAMPLES = """
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(),
)
module.exit_json()
if __name__ == '__main__':
main()
| 515
|
Python
|
.py
| 25
| 17.88
| 54
| 0.693111
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,390
|
noop.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/lookup/noop.py
|
# (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
lookup: noop
author: Ansible core team
short_description: returns input
description:
- this is a noop
deprecated:
alternative: Use some other lookup
why: Test deprecation
removed_in: '3.0.0'
extends_documentation_fragment:
- testns.testcol2.version_added
"""
EXAMPLES = """
- name: do nothing
debug: msg="{{ lookup('testns.testcol.noop', [1,2,3,4] }}"
"""
RETURN = """
_list:
description: input given
version_added: 1.0.0
"""
from collections.abc import Sequence
from ansible.plugins.lookup import LookupBase
from ansible.errors import AnsibleError
class LookupModule(LookupBase):
def run(self, terms, **kwargs):
if not isinstance(terms, Sequence):
raise AnsibleError("testns.testcol.noop expects a list")
return terms
| 1,010
|
Python
|
.py
| 33
| 26.090909
| 92
| 0.695967
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,391
|
grouped.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/filter/grouped.py
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.utils.display import Display
display = Display()
def nochange(a):
return a
def meaningoflife(a):
return 42
class FilterModule(object):
""" Ansible core jinja2 filters """
def filters(self):
return {
'noop': nochange,
'ultimatequestion': meaningoflife,
'b64decode': nochange, # here to colide with basename of builtin
}
| 540
|
Python
|
.py
| 16
| 27.875
| 92
| 0.675097
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,392
|
in_subdir.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/filter/filter_subdir/in_subdir.py
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.utils.display import Display
display = Display()
def nochange(a):
return a
class FilterModule(object):
""" Ansible core jinja2 filters """
def filters(self):
return {
'noop': nochange,
'nested': nochange,
}
| 408
|
Python
|
.py
| 13
| 25.769231
| 92
| 0.664083
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,393
|
notjsonfile.py
|
ansible_ansible/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/cache/notjsonfile.py
|
# (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
cache: notjsonfile
short_description: JSON formatted files.
description:
- This cache uses JSON formatted, per host, files saved to the filesystem.
author: Ansible Core (@ansible-core)
version_added: 0.7.0
options:
_uri:
required: True
description:
- Path in which the cache plugin will save the JSON files
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
version_added: 1.2.0
ini:
- key: fact_caching_connection
section: defaults
deprecated:
alternative: none
why: Test deprecation
version: '2.0.0'
_prefix:
description: User defined prefix to use when creating the JSON files
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
version_added: 1.1.0
ini:
- key: fact_caching_prefix
section: defaults
deprecated:
alternative: none
why: Another test deprecation
removed_at_date: '2050-01-01'
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
vars:
- name: notsjonfile_fact_caching_timeout
version_added: 1.5.0
deprecated:
alternative: do not use a variable
why: Test deprecation
version: '3.0.0'
type: integer
extends_documentation_fragment:
- testns.testcol2.plugin
"""
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by json files.
"""
pass
| 1,987
|
Python
|
.py
| 62
| 23.225806
| 92
| 0.60625
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,394
|
_deprecated_with_docs.py
|
ansible_ansible/test/integration/targets/ansible-doc/lookup_plugins/_deprecated_with_docs.py
|
# Copyright (c) 2022 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: deprecated_with_docs
short_description: test lookup
description: test lookup
author: Ansible Core Team
version_added: "2.14"
deprecated:
why: reasons
alternative: other thing
removed_in: "2.16"
removed_from_collection: "ansible.legacy"
options: {}
"""
EXAMPLE = """
"""
RETURN = """
"""
| 543
|
Python
|
.py
| 20
| 22.9
| 92
| 0.671815
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,395
|
_deprecated_with_adj_docs.py
|
ansible_ansible/test/integration/targets/ansible-doc/lookup_plugins/_deprecated_with_adj_docs.py
|
# Copyright (c) 2022 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
| 166
|
Python
|
.py
| 3
| 54
| 92
| 0.759259
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,396
|
statichost.py
|
ansible_ansible/test/integration/targets/ansible-doc/broken-docs/collections/ansible_collections/testns/testcol/plugins/inventory/statichost.py
|
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
inventory: statichost
broken:
short_description: Add a single host
description: Add a single host
extends_documentation_fragment:
- inventory_cache
options:
plugin:
description: plugin name (must be statichost)
required: true
hostname:
description: Toggle display of stderr even when script was successful
required: True
"""
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
class InventoryModule(BaseInventoryPlugin, Cacheable):
NAME = 'testns.content_adj.statichost'
def verify_file(self, path):
pass
def parse(self, inventory, loader, path, cache=None):
pass
| 881
|
Python
|
.py
| 25
| 29.64
| 92
| 0.716647
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,397
|
noop_vars_plugin.py
|
ansible_ansible/test/integration/targets/ansible-doc/broken-docs/collections/ansible_collections/testns/testcol/plugins/vars/noop_vars_plugin.py
|
from __future__ import annotations
DOCUMENTATION = """
vars: noop_vars_plugin
broken:
short_description: Do NOT load host and group vars
description: don't test loading host and group vars from a collection
options:
stage:
default: all
choices: ['all', 'inventory', 'task']
type: str
ini:
- key: stage
section: testns.testcol.noop_vars_plugin
env:
- name: ANSIBLE_VARS_PLUGIN_STAGE
extends_documentation_fragment:
- testns.testcol2.deprecation
"""
from ansible.plugins.vars import BaseVarsPlugin
class VarsModule(BaseVarsPlugin):
def get_vars(self, loader, path, entities, cache=True):
super(VarsModule, self).get_vars(loader, path, entities)
return {'collection': 'yes', 'notreal': 'value'}
| 831
|
Python
|
.py
| 24
| 27.833333
| 73
| 0.658354
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,398
|
notrealmodule.py
|
ansible_ansible/test/integration/targets/ansible-doc/broken-docs/collections/ansible_collections/testns/testcol/plugins/modules/notrealmodule.py
|
#!/usr/bin/python
from __future__ import annotations
import json
def main():
print(json.dumps(dict(changed=False, source='testns.testcol.notrealmodule')))
if __name__ == '__main__':
main()
| 202
|
Python
|
.py
| 7
| 26
| 81
| 0.694737
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
13,399
|
randommodule.py
|
ansible_ansible/test/integration/targets/ansible-doc/broken-docs/collections/ansible_collections/testns/testcol/plugins/modules/randommodule.py
|
#!/usr/bin/python
from __future__ import annotations
DOCUMENTATION = """
---
module: randommodule
short_description: A random module
description:
- A random module.
author:
- Ansible Core Team
version_added: 1.0.0
deprecated:
alternative: Use some other module
why: Test deprecation
removed_in: '3.0.0'
options:
test:
description: Some text.
type: str
version_added: 1.2.0
sub:
description: Suboptions.
type: dict
suboptions:
subtest:
description: A suboption.
type: int
version_added: 1.1.0
# The following is the wrong syntax, and should not get processed
# by add_collection_to_versions_and_dates()
options:
subtest2:
description: Another suboption.
type: float
version_added: 1.1.0
# The following is not supported in modules, and should not get processed
# by add_collection_to_versions_and_dates()
env:
- name: TEST_ENV
version_added: 1.0.0
deprecated:
alternative: none
why: Test deprecation
removed_in: '2.0.0'
version: '2.0.0'
extends_documentation_fragment:
- testns.testcol2.module
"""
EXAMPLES = """
"""
RETURN = """
z_last:
description: A last result.
broken:
type: str
returned: success
version_added: 1.3.0
m_middle:
description:
- This should be in the middle.
- Has some more data
type: dict
returned: success and 1st of month
contains:
suboption:
description: A suboption.
type: str
choices: [ARF, BARN, c_without_capital_first_letter]
version_added: 1.4.0
a_first:
description: A first result.
type: str
returned: success
"""
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(),
)
module.exit_json()
if __name__ == '__main__':
main()
| 2,124
|
Python
|
.py
| 82
| 18.646341
| 81
| 0.591424
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|