diff --git a/README.md b/README.md index bc62e308d..00e420853 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,7 @@ This Nautobot application framework includes the following integrations: - Device42 - Infoblox - IPFabric +- Itential - ServiceNow Read more about integrations [here](https://docs.nautobot.com/projects/ssot/en/latest/user/integrations). To enable and configure integrations follow the instructions from [the install guide](https://docs.nautobot.com/projects/ssot/en/latest/admin/install/#integrations-configuration). @@ -79,6 +80,7 @@ The SSoT framework includes a number of integrations with external Systems of Re * Arista CloudVision * Device42 * Infoblox +* Itential * ServiceNow > Note that the Arista CloudVision integration is currently incompatible with the [Arista Labs](https://labs.arista.com/) environment due to a TLS issue. It has been confirmed to work in on-prem environments previously. @@ -114,6 +116,9 @@ This project includes code originally written in separate Nautobot apps, which h [@nniehoff](https://github.com/nniehoff), [@qduk](https://github.com/qduk), [@ubajze](https://github.com/ubajze) +- [nautobot-plugin-ssot-device42](https://github.com/nautobot/nautobot-plugin-ssot-device42): + Thanks + [@jdrew82](https://github.com/jdrew82) - [nautobot-plugin-ssot-infoblox](https://github.com/nautobot/nautobot-plugin-ssot-infoblox): Thanks [@FragmentedPacket](https://github.com/FragmentedPacket), diff --git a/changes/484.housekeeping b/changes/484.housekeeping new file mode 100644 index 000000000..881c35ec0 --- /dev/null +++ b/changes/484.housekeeping @@ -0,0 +1 @@ +Releasing 2.7.0 \ No newline at end of file diff --git a/development/creds.example.env b/development/creds.example.env index 780d04b29..5feb6c71c 100644 --- a/development/creds.example.env +++ b/development/creds.example.env @@ -29,6 +29,8 @@ MYSQL_PASSWORD=${NAUTOBOT_DB_PASSWORD} NAUTOBOT_ARISTACV_CVP_PASSWORD="changeme" NAUTOBOT_ARISTACV_CVP_TOKEN="changeme" +NAUTOBOT_SSOT_DEVICE42_PASSWORD="changeme" + NAUTOBOT_SSOT_INFOBLOX_PASSWORD="changeme" # ACI Credentials. Append friendly name to the end to identify each APIC. diff --git a/development/development.env b/development/development.env index 3eb37964b..930f62db5 100644 --- a/development/development.env +++ b/development/development.env @@ -45,7 +45,7 @@ NAUTOBOT_CELERY_TASK_TIME_LIMIT=7200 NAUTOBOT_SSOT_HIDE_EXAMPLE_JOBS="False" NAUTOBOT_SSOT_ALLOW_CONFLICTING_APPS="False" -NAUTOBOT_SSOT_ENABLE_ACI="True" +NAUTOBOT_SSOT_ENABLE_ACI="False" NAUTOBOT_SSOT_ACI_TAG="ACI" NAUTOBOT_SSOT_ACI_TAG_COLOR="0047AB" NAUTOBOT_SSOT_ACI_TAG_UP="UP" @@ -57,7 +57,7 @@ NAUTOBOT_SSOT_ACI_IGNORE_TENANTS="[mgmt,infra]" NAUTOBOT_SSOT_ACI_COMMENTS="Created by ACI SSoT Integration" NAUTOBOT_SSOT_ACI_SITE="Data Center" -NAUTOBOT_SSOT_ENABLE_ARISTACV="True" +NAUTOBOT_SSOT_ENABLE_ARISTACV="False" NAUTOBOT_ARISTACV_CONTROLLER_SITE="" NAUTOBOT_ARISTACV_CREATE_CONTROLLER="True" NAUTOBOT_ARISTACV_CVAAS_URL="www.arista.io:443" @@ -69,12 +69,12 @@ NAUTOBOT_ARISTACV_IMPORT_ACTIVE="False" NAUTOBOT_ARISTACV_IMPORT_TAG="False" NAUTOBOT_ARISTACV_VERIFY=True -NAUTOBOT_SSOT_ENABLE_DEVICE42="True" +NAUTOBOT_SSOT_ENABLE_DEVICE42="False" NAUTOBOT_SSOT_DEVICE42_HOST="" NAUTOBOT_SSOT_DEVICE42_USERNAME="" NAUTOBOT_SSOT_DEVICE42_PASSWORD="" -NAUTOBOT_SSOT_ENABLE_INFOBLOX="True" +NAUTOBOT_SSOT_ENABLE_INFOBLOX="False" NAUTOBOT_SSOT_INFOBLOX_DEFAULT_STATUS="Active" NAUTOBOT_SSOT_INFOBLOX_ENABLE_SYNC_TO_INFOBLOX="True" NAUTOBOT_SSOT_INFOBLOX_IMPORT_OBJECTS_IP_ADDRESSES="True" @@ -88,11 +88,13 @@ NAUTOBOT_SSOT_INFOBLOX_USERNAME="changeme" NAUTOBOT_SSOT_INFOBLOX_VERIFY_SSL="True" # NAUTOBOT_SSOT_INFOBLOX_WAPI_VERSION="" -NAUTOBOT_SSOT_ENABLE_SERVICENOW="True" +NAUTOBOT_SSOT_ENABLE_SERVICENOW="False" SERVICENOW_INSTANCE="" SERVICENOW_USERNAME="" -NAUTOBOT_SSOT_ENABLE_IPFABRIC="True" +NAUTOBOT_SSOT_ENABLE_IPFABRIC="False" IPFABRIC_HOST="https://ipfabric.example.com" IPFABRIC_SSL_VERIFY="True" IPFABRIC_TIMEOUT=15 + +NAUTOBOT_SSOT_ENABLE_ITENTIAL="True" diff --git a/development/docker-compose.mysql.yml b/development/docker-compose.mysql.yml index 062ada948..2f1103da4 100644 --- a/development/docker-compose.mysql.yml +++ b/development/docker-compose.mysql.yml @@ -19,7 +19,6 @@ services: db: image: "mysql:8" command: - - "--default-authentication-plugin=mysql_native_password" - "--max_connections=1000" env_file: - "development.env" diff --git a/development/nautobot_config.py b/development/nautobot_config.py index 12c20c603..ff9e7973e 100644 --- a/development/nautobot_config.py +++ b/development/nautobot_config.py @@ -196,6 +196,7 @@ "enable_device42": is_truthy(os.getenv("NAUTOBOT_SSOT_ENABLE_DEVICE42")), "enable_infoblox": is_truthy(os.getenv("NAUTOBOT_SSOT_ENABLE_INFOBLOX")), "enable_ipfabric": is_truthy(os.getenv("NAUTOBOT_SSOT_ENABLE_IPFABRIC")), + "enable_itential": is_truthy(os.getenv("NAUTOBOT_SSOT_ENABLE_ITENTIAL")), "enable_servicenow": is_truthy(os.getenv("NAUTOBOT_SSOT_ENABLE_SERVICENOW")), "hide_example_jobs": is_truthy(os.getenv("NAUTOBOT_SSOT_HIDE_EXAMPLE_JOBS")), "device42_host": os.getenv("NAUTOBOT_SSOT_DEVICE42_HOST", ""), diff --git a/docs/admin/integrations/index.md b/docs/admin/integrations/index.md index 292f11a17..1f28b95e8 100644 --- a/docs/admin/integrations/index.md +++ b/docs/admin/integrations/index.md @@ -7,4 +7,5 @@ This Nautobot app supports the following integrations: - [Device42](./device42_setup.md) - [Infoblox](./infoblox_setup.md) - [IPFabric](./ipfabric_setup.md) +- [Itential](./itential_setup.md) - [ServiceNow](./servicenow_setup.md) diff --git a/docs/admin/integrations/infoblox_setup.md b/docs/admin/integrations/infoblox_setup.md index 082019e8f..20088123b 100644 --- a/docs/admin/integrations/infoblox_setup.md +++ b/docs/admin/integrations/infoblox_setup.md @@ -4,7 +4,7 @@ This guide will walk you through the steps to set up Infoblox integration with t ## Prerequisites -Before configuring the integration, please ensure, that `nautobot-ssot` app was [installed with the Infoblox integration extra dependencies](../install.md#install-guide). +Before configuring the integration, please ensure, that the `nautobot-ssot` app was [installed with the Infoblox integration extra dependencies](../install.md#install-guide). ```shell pip install nautobot-ssot[infoblox] @@ -12,58 +12,171 @@ pip install nautobot-ssot[infoblox] ## Configuration -Integration behavior can be controlled with the following settings: - -| Setting | Default | Description | -| ------------------------------------------ | ------- | ----------------------------------------------------------------------------- | -| infoblox_url | N/A | URL of the Infoblox instance to sync with. | -| infoblox_username | N/A | The username to authenticate against Infoblox with. | -| infoblox_password | N/A | The password to authenticate against Infblox with. | -| infoblox_verify_ssl | True | Toggle SSL verification when syncing data with Infoblox. | -| infoblox_wapi_version | v2.12 | The version of the Infoblox API. | -| infoblox_enable_sync_to_infoblox | False | Add job to sync data from Nautobot into Infoblox. | -| infoblox_enable_rfc1918_network_containers | False | Add job to sync network containers to Nautobot (top level aggregates). | -| infoblox_default_status | active | Default Status to be assigned to imported objects. | -| infoblox_import_objects_ip_addresses | False | Import IP addresses from Infoblox to Nautobot. | -| infoblox_import_objects_subnets | False | Import subnets from Infoblox to Nautobot. | -| infoblox_import_objects_subnets_ipv6 | False | Import IPv6 subnets from Infoblox to Nautobot. | -| infoblox_import_objects_vlan_views | False | Import VLAN views from Infoblox to Nautobot. | -| infoblox_import_objects_vlans | False | Import VLANs from Infoblox to Nautobot. | -| infoblox_import_subnets | N/A | List of Subnets in CIDR string notation to filter import to. | -| infoblox_network_view | N/A | Only load IPAddresses from a specific Infoblox Network View. | -| infoblox_request_timeout | 60 | How long HTTP requests to Infoblox should wait for a response before failing. | - -Below is an example snippet from `nautobot_config.py` that demonstrates how to enable and configure Infoblox integration: - -```python -PLUGINS_CONFIG = { - "nautobot_ssot": { - "enable_infoblox": True, - "infoblox_default_status": os.getenv("NAUTOBOT_SSOT_INFOBLOX_DEFAULT_STATUS", "active"), - "infoblox_enable_rfc1918_network_containers": is_truthy( - os.getenv("NAUTOBOT_SSOT_INFOBLOX_ENABLE_RFC1918_NETWORK_CONTAINERS") - ), - "infoblox_enable_sync_to_infoblox": is_truthy(os.getenv("NAUTOBOT_SSOT_INFOBLOX_ENABLE_SYNC_TO_INFOBLOX")), - "infoblox_import_objects_ip_addresses": is_truthy( - os.getenv("NAUTOBOT_SSOT_INFOBLOX_IMPORT_OBJECTS_IP_ADDRESSES") - ), - "infoblox_import_objects_subnets": is_truthy(os.getenv("NAUTOBOT_SSOT_INFOBLOX_IMPORT_OBJECTS_SUBNETS")), - "infoblox_import_objects_subnets_ipv6": is_truthy(os.getenv("NAUTOBOT_SSOT_INFOBLOX_IMPORT_OBJECTS_SUBNETS_IPV6")), - "infoblox_import_objects_vlan_views": is_truthy(os.getenv("NAUTOBOT_SSOT_INFOBLOX_IMPORT_OBJECTS_VLAN_VIEWS")), - "infoblox_import_objects_vlans": is_truthy(os.getenv("NAUTOBOT_SSOT_INFOBLOX_IMPORT_OBJECTS_VLANS")), - "infoblox_import_subnets": [x for x in os.getenv("NAUTOBOT_SSOT_INFOBLOX_IMPORT_SUBNETS", "").split(",") if x], - "infoblox_password": os.getenv("NAUTOBOT_SSOT_INFOBLOX_PASSWORD"), - "infoblox_url": os.getenv("NAUTOBOT_SSOT_INFOBLOX_URL"), - "infoblox_username": os.getenv("NAUTOBOT_SSOT_INFOBLOX_USERNAME"), - "infoblox_verify_ssl": is_truthy(os.getenv("NAUTOBOT_SSOT_INFOBLOX_VERIFY_SSL", True)), - "infoblox_wapi_version": os.getenv("NAUTOBOT_SSOT_INFOBLOX_WAPI_VERSION", "v2.12"), - "infoblox_request_timeout": 120, +!!! note + Legacy configuration settings defined in the `nautobot_config.py` and environmental variables are deprecated. These settings are migrated on a best-effort basis on the first startup following migration to the Nautobot SSOT 2.7.0 or higher. + +Integration configuration is defined in the instance of the `SSOTInfobloxConfig` model. Multiple configuration instances are supported. Synchronization jobs take the `Config` parameter which specifies the configuration instance to use. + +To access integration configuration navigate to `Apps -> Installed Apps` and click on the cog icon in the `Single Source of Truth` entry. Then in the table `SSOT Integration Configs` click on the `Infoblox Configuration List` link. This will take you to the view where you can view/modify existing config instances or create new ones. + +Configuration instance contains the below settings: + +| Setting | Default | Description | +| Name | N/A | Unique name of the configuration instance. | +| Description | N/A | Description of the configuration instance. | +| Infoblox Instance Config | N/A | External Integration object describing remote Infoblox instance. | +| Infoblox WAPI Version | v2.12 | The version of the Infoblox API. | +| Enabled for Sync Job | False | Allows this config to be used in the sync jobs. | +| Sync to Infoblox | False | Allows this config to be used in the job syncing from Nautobot to Infoblox. | +| Sync to Nautobot | True | Allows this config to be used in the job syncing from Infoblox to Nautobot. | +| Import IP Addresses | False | Import IP addresses from the source to the target system. | +| Import Networks | False | Import IP networks from the source to the target system. | +| Import VLAN Views | False | Import VLAN Views from the source to the target system. | +| Import VLANs | False | Import VLANs from the source to the target system. | +| Import IPv4 | True | Import IPv4 objects from the source to the target system. | +| Import IPv6 | False | Import IPv6 objects from the source to the target system. | +| Fixed address type | Do not create record | Selects type of Fixed Address to create in Infoblox for imported IP Addresses. | +| DNS record type | Do not create record | Selects the type of DNS record to create in Infoblox for imported IP Addresses. | +| Default object status | Active | Default Status to be assigned to imported objects. | +| Infoblox - deletable models | [] | Infoblox model types whose instances are allowed to be deleted during sync. | +| Nautobot - deletable models | [] | Nautobot model types whose instances are allowed to be deleted during sync. | +| Infoblox Sync Filters | `[{"network_view": "default"}]` | Filters control what data is loaded from the source and target systems and considered for sync. | +| Infoblox Network View to DNS Mapping | `{}`| Map specifying Infoblox DNS View for each Network View where DNS records need to be created. +| Extensible Attributes/Custom Fields to Ignore | `{"custom_fields": [], "extensible_attributes": []}` | Specifies Nautobot custom fields and Infoblox extensible attributes that are excluded from the sync. | + +Each Infoblox configuration must be linked to an External Integration describing the Infoblox instance. The following External Integration fields must be defined for integration to work correctly: + +| Setting | Description | +| Remote URL | URL of the remote Infoblox instance to sync with. | +| Verify SSL | Toggle SSL verification when syncing data with Infoblox. | +| Secrets Group | Secrets Group defining credentials used when connecting to the Infoblox instance. | +| Timeout | How long HTTP requests to Infoblox should wait for a response before failing. | + +The Secrets Group linked to the Infoblox External Integration must contain password and username secrets defined as per the below: + +| Access Type | Secret Type | +| REST | Password | +| REST | Username | + + +### Configuring Infoblox Sync Filters + +Infoblox Sync Filters is a mandatory setting used to control the scope of the IP objects that are loaded from Nautobot and Infoblox. Only these objects are in the scope of the synchronization process. The default value of this setting is: + +```json +[ + { + "network_view": "default" + } +] +``` + +This default value specifies that all IPv4 and IPv6 objects located in Infoblox "default" Network View or Nautobot "Global" Namespace, will be loaded for comparison and considered for synchronization. + +Infoblox Sync Filters can contain multiple entries. Each entry is a dictionary with one mandatory key `network_view` and two optional keys `prefixes_ipv4` and `prefixes_ipv6`. + +- `network_view` specifies the name of the Infoblox Network View/Nautobot Namespace from which to load IP objects. There can be only one filter entry per network view name. +- `prefixes_ipv4` (optional) - a list of top-level IPv4 prefixes from which to load IPv4 networks and IP addresses. This applies to both Infoblox and Nautobot. If this key is not defined, all IPv4 addresses within the given namespace are allowed to be loaded. +- `prefixes_ipv6` (optional) - a list of top-level IPv6 prefixes from which to load IPv6 networks and IP addresses. This applies to both Infoblox and Nautobot. If this key is not defined, all IPv6 addresses within the given namespace are allowed to be loaded. + +Below is an example showing three filters used for filtering loaded data: + +```json +[ + { + "network_view": "default" + }, + { + "network_view": "dev", + "prefixes_ipv4": [ + "192.168.0.0/16" + ] + }, + { + "network_view": "test", + "prefixes_ipv4": [ + "10.0.0.0/8" + ], + "prefixes_ipv6": [ + "2001:5b0:4100::/40" + ] } +] +``` + +The above filters will allow the loading of the following data from Infoblox and Nautobot: + +- All IPv4 and IPv6 prefixes and IP addresses in the Infoblox network view "default" and Nautobot namespace "Global". +- Only IPv4 prefixes and IP addresses, contained within the `192.168.0.0/16` container, located in Infoblox network view "dev" and Nautobot namespace "dev". All IPv6 prefixes and IP addresses in the Infoblox network view "dev" and Nautobot namespace "dev". +- Only IPv4 prefixes and IP addresses, contained within the `10.0.0.0/8` container, located in Infoblox network view "test" and Nautobot namespace "test". Only IPv6 prefixes and IP addresses contained withing the `2001:5b0:4100::/40` container that are located in the Infoblox network view "test" and Nautobot namespace "test". + + +### Configuring Infoblox DNS View Mapping + +Infoblox DNS View Mapping is an optional setting that tells Infoblox SSOT where to create DNS Host, A, and PTR records. Infoblox allows multiple DNS Views to be defined for one Network View. If no mappings are configured the application will create DNS records in the default DNS View associated with the Network View, usually named `default.{network_view_name}`, where `network_view_name` is the name of the parent Network View. + +To define mapping specify the name of the Network View as the key and the name of the DNS View as the value. For example: + + +```json +{ + "dev": "dev view", + "default": "corporate", } ``` -!!! note - All integration settings are defined in the block above as an example. Only some will be needed as described below. +The above configuration will create DNS records linked to Network View "dev" in the "dev view" DNS View and records linked to Network View "default" in the "corporate" DNS View. + +### Configuring Extensible Attributes/Custom Fields to Ignore + +Extensible Attributes/Custom Fields to Ignore setting allows specifying Infoblox Extensible Attributes and Nautobot Custom Fields that are excluded from the synchronization. This stops unwanted extra data that is used for other purposes from being propagated between the systems. + +The default value of this setting is: + +```json +{ + "extensible_attributes": [], + "custom_fields": [] +} +``` + +That is, by default, all of the extensible attributes and custom fields will be synchronized, except the custom fields used internally by the Infoblox integration. + +To exclude Infoblox extensible attributes from being synchronized to Nautobot add the attribute names to the list `extensible_attributes` list. + +To exclude Infoblox custom fields from being synchronized to Infoblox add the custom field names to the list `custom_fields` list. + +## Custom Fields, Tags, and Relationships Used by The Infoblox Integration + +The Infoblox Integration requires the following Nautobot custom fields, tags, and relationships to function correctly. These are created automatically when Nautobot is started and care should be taken to ensure these are not deleted. + +### Custom Fields + +`dhcp_ranges` - Records DHCP ranges associated with a network. This applies to the following models: `Prefix`. +`ssot_synced_to_infoblox` - Records the date the Nautobot object was last synchronized to Infoblox. This applies to the following models: `IPAddress`, `Prefix`, `VLAN`, and `VLANGroup`. +`mac_address` - Records MAC address associated with an IP Address. This is required when creating an Infoblox Fixed Address of type MAC from Nautobot IP Address objects. This applies to the following model: `IPAddress`. +`fixed_address_comment` - Records comment for the corresponding Fixed Address record in Infoblox. This applies to the following model: `IPAddress`. +`dns_a_record_comment_custom_field` - Records comment for the corresponding DNS A record in Infoblox. This applies to the following model: `IPAddress`. +`dns_host_record_comment_custom_field` - Records comment for the corresponding DNS Host record in Infoblox. This applies to the following model: `IPAddress`. +`dns_ptr_record_comment_custom_field` - Records comment for the corresponding DNS PTR record in Infoblox. This applies to the following model: `IPAddress`. + + +### Tags + +`SSoT Synced from Infoblox` - Used to tag Nautobot objects that were synchronized from Infoblox. This applies to the following models: `IPAddress`, `Namespace`, `Prefix`, and `VLAN`. +`SSoT Synced to Infoblox` - Used to tag Nautobot objects that were synchronized to Infoblox. +This applies to the following models: `IPAddress`, `Prefix`, and `VLAN`. + + +### Relationships + +`prefix_to_vlan` - Used to link Nautobot Prefix to a Nautobot VLAN. This corresponds to an Infoblox Network to VLAN relationship. + +### Usage Notes + +- To create an Infoblox Fixed Address record from a Nautobot IP Address object the Nautobot side must have IP Address type set to `DHCP`. +- To create an Infoblox Fixed Address of type MAC the Nautobot IP Address must have a value defined in the `mac_address` custom field. + ## Upgrading from `nautobot-plugin-ssot-infoblox` App @@ -78,57 +191,3 @@ PLUGINS_CONFIG = { ```shell pip install --upgrade nautobot-ssot[infoblox] ``` -- Fix `nautobot_config.py` by removing `nautobot_ssot_infoblox` from `PLUGINS` and merging app configuration into `nautobot_ssot`: - ```python - PLUGINS = [ - "nautobot_ssot", - # "infoblox" # REMOVE THIS LINE - ] - - PLUGINS_CONFIG = { - # "nautobot_ssot_infoblox": { REMOVE THIS APP CONFIGURATION - # MOVE CONFIGURATION TO `nautobot_ssot` SECTION AND UPDATE KEYS - # "NAUTOBOT_INFOBLOX_URL": os.getenv("NAUTOBOT_INFOBLOX_URL", ""), - # "NAUTOBOT_INFOBLOX_USERNAME": os.getenv("NAUTOBOT_INFOBLOX_USERNAME", ""), - # "NAUTOBOT_INFOBLOX_PASSWORD": os.getenv("NAUTOBOT_INFOBLOX_PASSWORD", ""), - # "NAUTOBOT_INFOBLOX_VERIFY_SSL": os.getenv("NAUTOBOT_INFOBLOX_VERIFY_SSL", "true"), - # "NAUTOBOT_INFOBLOX_WAPI_VERSION": os.getenv("NAUTOBOT_INFOBLOX_WAPI_VERSION", "v2.12"), - # "enable_sync_to_infoblox": False, - # "enable_rfc1918_network_containers": False, - # "default_status": "active", - # "infoblox_import_objects": { - # "vlan_views": os.getenv("NAUTOBOT_INFOBLOX_IMPORT_VLAN_VIEWS", True), - # "vlans": os.getenv("NAUTOBOT_INFOBLOX_IMPORT_VLANS", True), - # "subnets": os.getenv("NAUTOBOT_INFOBLOX_INFOBLOX_IMPORT_SUBNETS", True), - # "ip_addresses": os.getenv("NAUTOBOT_INFOBLOX_IMPORT_IP_ADDRESSES", True), - # }, - # "infoblox_import_subnets": ["10.46.128.0/18", "192.168.1.0/24"], - # } - "nautobot_ssot": { - # Enable Infoblox integration - "enable_infoblox": True, - # Following lines are moved from `nautobot_ssot_infoblox` and prefixed with `infoblox_` - "infoblox_default_status": os.getenv("NAUTOBOT_SSOT_INFOBLOX_DEFAULT_STATUS", "active"), - "infoblox_enable_rfc1918_network_containers": is_truthy( - os.getenv("NAUTOBOT_SSOT_INFOBLOX_ENABLE_RFC1918_NETWORK_CONTAINERS") - ), - "infoblox_enable_sync_to_infoblox": is_truthy(os.getenv("NAUTOBOT_SSOT_INFOBLOX_ENABLE_SYNC_TO_INFOBLOX")), - "infoblox_import_objects_ip_addresses": is_truthy( - os.getenv("NAUTOBOT_SSOT_INFOBLOX_IMPORT_OBJECTS_IP_ADDRESSES") - ), - "infoblox_import_objects_subnets": is_truthy(os.getenv("NAUTOBOT_SSOT_INFOBLOX_IMPORT_OBJECTS_SUBNETS")), - "infoblox_import_objects_subnets_ipv6": is_truthy(os.getenv("NAUTOBOT_SSOT_INFOBLOX_IMPORT_OBJECTS_SUBNETS_IPV6")), - "infoblox_import_objects_vlan_views": is_truthy(os.getenv("NAUTOBOT_SSOT_INFOBLOX_IMPORT_OBJECTS_VLAN_VIEWS")), - "infoblox_import_objects_vlans": is_truthy(os.getenv("NAUTOBOT_SSOT_INFOBLOX_IMPORT_OBJECTS_VLANS")), - "infoblox_import_subnets": [x for x in os.getenv("NAUTOBOT_SSOT_INFOBLOX_IMPORT_SUBNETS", "").split(",") if x], - "infoblox_password": os.getenv("NAUTOBOT_SSOT_INFOBLOX_PASSWORD"), - "infoblox_url": os.getenv("NAUTOBOT_SSOT_INFOBLOX_URL"), - "infoblox_username": os.getenv("NAUTOBOT_SSOT_INFOBLOX_USERNAME"), - "infoblox_verify_ssl": is_truthy(os.getenv("NAUTOBOT_SSOT_INFOBLOX_VERIFY_SSL", True)), - "infoblox_wapi_version": os.getenv("NAUTOBOT_SSOT_INFOBLOX_WAPI_VERSION", "v2.12"), - } - } - ``` - -!!! note - Configuration keys are prefixed with `infoblox_`. diff --git a/docs/admin/integrations/itential_setup.md b/docs/admin/integrations/itential_setup.md new file mode 100644 index 000000000..ae1479ce7 --- /dev/null +++ b/docs/admin/integrations/itential_setup.md @@ -0,0 +1,63 @@ +# Itential Integration Setup + +This guide will walk you through steps to set up Itential integration with the `nautobot_ssot` app. + +## Prerequisites + +Before configuring the integration, please ensure, that `nautobot-ssot` app was [installed with the Itential integration extra dependencies](../install.md#install-guide). + +```shell +pip install nautobot-ssot[itential] +``` + +## Configuration + +The integration with Itential primarily utilizes the [External Integrations](https://docs.nautobot.com/projects/core/en/stable/user-guide/platform-functionality/externalintegration/?h=external) and [Secrets](https://docs.nautobot.com/projects/core/en/stable/user-guide/platform-functionality/secret/?h=secrets) features within Nautobot to set up the integration. To enable this integration, the only modification needed is to activate it in the nautobot_config.py file. + +Below is an example snippet from `nautobot_config.py` that demonstrates how to enable the Itential integration: + +```python +PLUGINS_CONFIG = { + "nautobot_ssot": { + "enable_itential": True, + } +} +``` + +Remaining configurations are performed in the Nautobot UI or through the Nautobot API. + +### Secrets + +The Itential integration necessitates four secret values: (1) Itential API access username, (2) Itential API access password, (3) network device access username, and (4) network device access password. You can store these secrets using the secrets provider of your choice. + +### Secrets Group + +When assigning secrets to a secrets group, please refer to the table below to correctly assign each secret to its respective access type and secret type. + +| Secret Description | Access Type | Secret Type | +|-----------------------|-------------|-------------| +| Itential API username | REST | Username | +| Itential API password | REST | Password | +| Device username | GENERIC | Username | +| Device password | GENERIC | Password | + +### External Integration + +When setting up an external integration, you must provide the following required fields: + +1. **Name**: The unique identifier for the integration. +2. **Remote URL**: The endpoint URL, including the protocol and port, if applicable. +3. **Verify SSL**: A boolean value indicating whether SSL certificates should be verified. +4. **Secrets Group**: The group of secrets associated with the integration, containing necessary authentication details. + +The remote URL must include both the protocol (either http or https) and the TCP port used by the automation gateway. For example, to access the automation gateway, you would enter a URL like: https://iag.example.com:8443. + +### Automation Gateway Management + +To manage the Automation Gateway, navigate to Plugins -> Single Source of Truth -> Itential Automation Gateway in your application. From this interface, you can input details about the automation gateway, which include: + +1. **Name**: Specify the name of the automation gateway. +2. **Description**: Provide a brief description of what the automation gateway is used for. +3. **Location**: Indicate the primary location of the devices managed by the automation gateway. +4. **Location Descendants**: This boolean value determines whether the automation gateway should also manage devices in child locations of the specified primary location. +5. **Enabled**: This boolean setting allows you to enable or disable inventory synchronization with the automation gateway. diff --git a/docs/admin/integrations/servicenow_setup.md b/docs/admin/integrations/servicenow_setup.md index 6c1bbc891..11de64353 100644 --- a/docs/admin/integrations/servicenow_setup.md +++ b/docs/admin/integrations/servicenow_setup.md @@ -80,3 +80,4 @@ PLUGINS_CONFIG = { !!! note Configuration keys are prefixed with `servicenow_`. + diff --git a/docs/admin/release_notes/version_2.7.md b/docs/admin/release_notes/version_2.7.md new file mode 100644 index 000000000..aff9e964a --- /dev/null +++ b/docs/admin/release_notes/version_2.7.md @@ -0,0 +1,49 @@ + +## [v2.7.0 (2024-07-16)](https://github.com/nautobot/nautobot-app-ssot/releases/tag/v2.7.0) + +### Added + +- [#432](https://github.com/nautobot/nautobot-app-ssot/issues/432) - Added an SSoT to sync Nautobot ==> Itential Automation Gateway. +- [#432](https://github.com/nautobot/nautobot-app-ssot/issues/432) - This integration allows users to sync Nautobot device inventory to Itential Automation Gateway(s) (IAG). +- [#432](https://github.com/nautobot/nautobot-app-ssot/issues/432) - The current IAG inventory that is supported is its default Ansible inventory. +- [#432](https://github.com/nautobot/nautobot-app-ssot/issues/432) - Netmiko, Nornir, HTTP requests inventories will be added at a later date. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Added plugin configuration page collecting configurations for integrations. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - added SSOTInfobloxConfig model used for providing Infoblox integration configuration. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - added support for multiple configuration instances. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - added support for Infoblox Network Views and Nautobot Namespaces. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - added support for selecting a subset of Network and IP address objects loaded for synchronization. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - added support for creating Infoblox IP Addresses as A and PTR records. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - added support for creating Infoblox IP Addresses as Fixed Address records of type RESERVED and MAC_ADDRESS. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - added support for excluding extensive attributes and custom fields when synchronizing objects. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - added support for selectively enabling synchronization of IPv4 and IPv6 objects. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - added support for specifying Infoblox DNS View where DNS records are created. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - added support for specifying record types subject to deletion in Infoblox and Nautobot. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - added methods to Infoblox handling fixed addresses, DNS A, Host and PTR records, network views, DNS views, and authoritative zones. +- [#469](https://github.com/nautobot/nautobot-app-ssot/issues/469) - Added more models for import in Example Jobs. + +### Changed + +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - configuration settings are now defined in the instances of the SSOTInfobloxConfig model. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - functionality provided by the `infoblox_import_subnets` settings has been replaced with the `infoblox_sync_filters` field in the SSOTInfobloxConfig instance. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - updated Infoblox client methods to support Network View. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - standardized `JSONDecoderError` handling in the Infoblox client. + +### Removed + +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - configuration settings defined in `nautobot_config.py` have been removed. +- [#442](https://github.com/nautobot/nautobot-app-ssot/issues/442) - Infoblox integration - configuration settings defined in environmental variables have been removed. + +### Fixed + +- [#234](https://github.com/nautobot/nautobot-app-ssot/issues/234) - Fixed integration tests so they're no longer dependent upon being enabled in dev environment. +- [#437](https://github.com/nautobot/nautobot-app-ssot/issues/437) - Fixed link from list view to filtered sync log view by changing filter query to `sync` from overview. +- [#443](https://github.com/nautobot/nautobot-app-ssot/issues/443) - Fixed issue with loading duplicate IPAddresses from Infoblox. +- [#456](https://github.com/nautobot/nautobot-app-ssot/issues/456) - Fix Device42 integration unit test that was expecting wrong BIG-IP netmiko platform name. +- [#463](https://github.com/nautobot/nautobot-app-ssot/issues/463) - Fixed call in CVP integration to pass `import_active` config setting to get_devices() function call. +- [#479](https://github.com/nautobot/nautobot-app-ssot/issues/479) - Correct get_or_instantiate() to use self.device_type instead of "device_type" in ACI adapter. +- [#479](https://github.com/nautobot/nautobot-app-ssot/issues/479) - Refactor load_interfaces() to have check for device_specs var being defined in case file isn't loaded. + +### Documentation + +- [#450](https://github.com/nautobot/nautobot-app-ssot/issues/450) - Add missing attribution for Device42 integration to README. +- [#472](https://github.com/nautobot/nautobot-app-ssot/issues/472) - Update ServiceNow documentation for Locations and FAQ error. diff --git a/docs/user/faq.md b/docs/user/faq.md index e4955af91..ce6588a28 100644 --- a/docs/user/faq.md +++ b/docs/user/faq.md @@ -3,3 +3,11 @@ ## _Is the application actually a Single Source of Truth?_ In reality the application intends to have behaviors as if it was a SSoT. The difference being, the application intends to aggregate data in the real world where it is not feasible to have the System of Record be in a single system. + +## Why did my ServiceNow job fail with an `IncompleteJSONError`? + +``` +An exception occurred: `IncompleteJSONError: lexical error: invalid char in json text. packaging.version.parse(nautobot_version): + incompatible_apps_msg.append(f"The `{app}` requires Nautobot version {nb_ver} or higher.\n") + + if incompatible_apps_msg: + raise RuntimeError( + f"This version of Nautobot ({nautobot_version}) does not meet minimum requirements for the following apps:\n {''.join(incompatible_apps_msg)}." + "See: https://docs.nautobot.com/projects/ssot/en/latest/admin/upgrade/#potential-apps-conflicts" + ) + def _check_for_conflicting_apps(): intersection = set(_CONFLICTING_APP_NAMES).intersection(set(settings.PLUGINS)) @@ -35,6 +56,8 @@ def _check_for_conflicting_apps(): if not is_truthy(os.getenv("NAUTOBOT_SSOT_ALLOW_CONFLICTING_APPS", "False")): _check_for_conflicting_apps() +_check_min_nautobot_version_met() + class NautobotSSOTAppConfig(NautobotAppConfig): """App configuration for the nautobot_ssot app.""" @@ -96,20 +119,8 @@ class NautobotSSOTAppConfig(NautobotAppConfig): "enable_infoblox": False, "enable_ipfabric": False, "enable_servicenow": False, + "enable_itential": False, "hide_example_jobs": True, - "infoblox_default_status": "", - "infoblox_enable_rfc1918_network_containers": False, - "infoblox_enable_sync_to_infoblox": False, - "infoblox_import_objects_ip_addresses": False, - "infoblox_import_objects_subnets": False, - "infoblox_import_objects_vlan_views": False, - "infoblox_import_objects_vlans": False, - "infoblox_import_subnets": [], - "infoblox_password": "", - "infoblox_url": "", - "infoblox_username": "", - "infoblox_verify_ssl": True, - "infoblox_wapi_version": "", "ipfabric_api_token": "", "ipfabric_host": "", "ipfabric_ssl_verify": True, @@ -120,6 +131,7 @@ class NautobotSSOTAppConfig(NautobotAppConfig): "servicenow_username": "", } caching_config = {} + config_view_name = "plugins:nautobot_ssot:config" def ready(self): """Trigger callback when database is ready.""" diff --git a/nautobot_ssot/api/urls.py b/nautobot_ssot/api/urls.py new file mode 100644 index 000000000..a45c5cad9 --- /dev/null +++ b/nautobot_ssot/api/urls.py @@ -0,0 +1,14 @@ +"""Django urlpatterns declaration for nautobot_ssot API.""" + +from nautobot_ssot.integrations.utils import each_enabled_integration_module + +app_name = "ssot" # pylint: disable=invalid-name +urlpatterns = [] + + +def _add_integrations(): + for module in each_enabled_integration_module("api.urls"): + urlpatterns.extend(module.urlpatterns) + + +_add_integrations() diff --git a/nautobot_ssot/integrations/aci/diffsync/adapters/aci.py b/nautobot_ssot/integrations/aci/diffsync/adapters/aci.py index 9ea0ae0ad..cc25aa1b1 100644 --- a/nautobot_ssot/integrations/aci/diffsync/adapters/aci.py +++ b/nautobot_ssot/integrations/aci/diffsync/adapters/aci.py @@ -335,58 +335,58 @@ def load_interfaces(self): fn = os.path.join(devicetype_file_path, f"{device['model']}.yaml") if os.path.exists(fn): device_specs = load_yamlfile(fn) - for interface_name, interface in interfaces[device_name].items(): - if_list = [ - intf - for intf in device_specs["interfaces"] - if intf["name"] == interface_name.replace("eth", "Ethernet") - ] - if if_list: - intf_type = if_list[0]["type"] - else: - intf_type = "other" - new_interface = self.interface( - name=interface_name.replace("eth", "Ethernet"), - device=device["name"], - site=self.site, - description=interface["descr"], - gbic_vendor=interface["gbic_vendor"], - gbic_type=interface["gbic_type"], - gbic_sn=interface["gbic_sn"], - gbic_model=interface["gbic_model"], - state=interface["state"], - type=intf_type, - site_tag=self.site, - ) - self.add(new_interface) + if device_specs and device_specs.get("interfaces"): + for interface_name, interface in interfaces[device_name].items(): + if_list = [ + intf + for intf in device_specs["interfaces"] + if intf["name"] == interface_name.replace("eth", "Ethernet") + ] + if if_list: + intf_type = if_list[0]["type"] + else: + intf_type = "other" + new_interface = self.interface( + name=interface_name.replace("eth", "Ethernet"), + device=device["name"], + site=self.site, + description=interface["descr"], + gbic_vendor=interface["gbic_vendor"], + gbic_type=interface["gbic_type"], + gbic_sn=interface["gbic_sn"], + gbic_model=interface["gbic_model"], + state=interface["state"], + type=intf_type, + site_tag=self.site, + ) + self.add(new_interface) + for _interface in device_specs["interfaces"]: + if_list = [intf for intf in device_specs["interfaces"] if intf["name"] == _interface] + if if_list: + intf_type = if_list[0]["type"] + else: + intf_type = "other" + if re.match("^Eth[0-9]|^mgmt[0-9]", _interface["name"]): + new_interface = self.interface( + name=_interface["name"], + device=device["name"], + site=self.site, + description="", + gbic_vendor="", + gbic_type="", + gbic_sn="", + gbic_model="", + state="up", + type=intf_type, + site_tag=self.site, + ) + self.add(new_interface) else: logger.warning( "No YAML file exists in device-types for model %s, skipping interface creation", device["model"], ) - for _interface in device_specs["interfaces"]: - if_list = [intf for intf in device_specs["interfaces"] if intf["name"] == _interface] - if if_list: - intf_type = if_list[0]["type"] - else: - intf_type = "other" - if re.match("^Eth[0-9]|^mgmt[0-9]", _interface["name"]): - new_interface = self.interface( - name=_interface["name"], - device=device["name"], - site=self.site, - description="", - gbic_vendor="", - gbic_type="", - gbic_sn="", - gbic_model="", - state="up", - type=intf_type, - site_tag=self.site, - ) - self.add(new_interface) - def load_deviceroles(self): """Load device roles from ACI device data.""" device_roles = {value["role"] for value in self.devices.values()} @@ -410,7 +410,7 @@ def load_devices(self): if not model: self.get_or_instantiate( - "device_type", + self.device_type, ids={"model": value["model"], "part_nbr": ""}, attrs={"manufacturer": "Cisco", "u_height": 1, "comments": ""}, ) diff --git a/nautobot_ssot/integrations/aristacv/diffsync/adapters/cloudvision.py b/nautobot_ssot/integrations/aristacv/diffsync/adapters/cloudvision.py index a79aea7ab..a9c1f447e 100644 --- a/nautobot_ssot/integrations/aristacv/diffsync/adapters/cloudvision.py +++ b/nautobot_ssot/integrations/aristacv/diffsync/adapters/cloudvision.py @@ -69,7 +69,9 @@ def load_devices(self): except ObjectAlreadyExists as err: self.job.logger.warning(f"Error attempting to add CloudVision device. {err}") - for index, dev in enumerate(cloudvision.get_devices(client=self.conn.comm_channel), start=1): + for index, dev in enumerate( + cloudvision.get_devices(client=self.conn.comm_channel, import_active=config.import_active), start=1 + ): if self.job.debug: self.job.logger.info(f"Loading {index}° device") if dev["hostname"] != "": diff --git a/nautobot_ssot/integrations/infoblox/api/__init__.py b/nautobot_ssot/integrations/infoblox/api/__init__.py new file mode 100644 index 000000000..777ad3b40 --- /dev/null +++ b/nautobot_ssot/integrations/infoblox/api/__init__.py @@ -0,0 +1 @@ +"""REST API module for nautobot_ssot infoblox integration.""" diff --git a/nautobot_ssot/integrations/infoblox/api/serializers.py b/nautobot_ssot/integrations/infoblox/api/serializers.py new file mode 100644 index 000000000..c1109dafb --- /dev/null +++ b/nautobot_ssot/integrations/infoblox/api/serializers.py @@ -0,0 +1,15 @@ +"""API serializers for nautobot_ssot infoblox.""" + +from nautobot.apps.api import NautobotModelSerializer + +from nautobot_ssot.integrations.infoblox.models import SSOTInfobloxConfig + + +class SSOTInfobloxConfigSerializer(NautobotModelSerializer): # pylint: disable=too-many-ancestors + """REST API serializer for SSOTInfobloxConfig records.""" + + class Meta: + """Meta attributes.""" + + model = SSOTInfobloxConfig + fields = "__all__" diff --git a/nautobot_ssot/integrations/infoblox/api/urls.py b/nautobot_ssot/integrations/infoblox/api/urls.py new file mode 100644 index 000000000..625540967 --- /dev/null +++ b/nautobot_ssot/integrations/infoblox/api/urls.py @@ -0,0 +1,12 @@ +"""Django urlpatterns declaration for nautobot_ssot infoblox API.""" + +from rest_framework import routers + +from nautobot_ssot.integrations.infoblox.api.views import SSOTInfobloxConfigView + +router = routers.DefaultRouter() + +router.register("config/infoblox", SSOTInfobloxConfigView) +app_name = "ssot" # pylint: disable=invalid-name + +urlpatterns = router.urls diff --git a/nautobot_ssot/integrations/infoblox/api/views.py b/nautobot_ssot/integrations/infoblox/api/views.py new file mode 100644 index 000000000..65408758b --- /dev/null +++ b/nautobot_ssot/integrations/infoblox/api/views.py @@ -0,0 +1,15 @@ +"""API views for nautobot_ssot infoblox.""" + +from nautobot.apps.api import NautobotModelViewSet + +from nautobot_ssot.integrations.infoblox.filters import SSOTInfobloxConfigFilterSet +from nautobot_ssot.integrations.infoblox.models import SSOTInfobloxConfig +from .serializers import SSOTInfobloxConfigSerializer + + +class SSOTInfobloxConfigView(NautobotModelViewSet): # pylint: disable=too-many-ancestors + """API CRUD operations set for the SSOTInfobloxConfig view.""" + + queryset = SSOTInfobloxConfig.objects.all() + filterset_class = SSOTInfobloxConfigFilterSet + serializer_class = SSOTInfobloxConfigSerializer diff --git a/nautobot_ssot/integrations/infoblox/choices.py b/nautobot_ssot/integrations/infoblox/choices.py new file mode 100644 index 000000000..35be04cb6 --- /dev/null +++ b/nautobot_ssot/integrations/infoblox/choices.py @@ -0,0 +1,86 @@ +"""Choicesets for Infoblox integration.""" + +from nautobot.apps.choices import ChoiceSet + + +class FixedAddressTypeChoices(ChoiceSet): + """Choiceset used by SSOTInfobloxConfig. + + Infoblox supports the below values for `match_client` field in the `fixed_address` object: + + CIRCUIT_ID + CLIENT_ID + MAC_ADDRESS + REMOTE_ID + RESERVED + + We currently support creation of MAC_ADDRESS and RESERVED types only. + """ + + DONT_CREATE_RECORD = "do-not-create-record" + MAC_ADDRESS = "create-fixed-with-mac-address" + RESERVED = "create-reservation-no-mac-address" + + CHOICES = ( + (DONT_CREATE_RECORD, "Do not create fixed address"), + (MAC_ADDRESS, "Create record with MAC adddres"), + (RESERVED, "Create reservation with no MAC address"), + ) + + +class DNSRecordTypeChoices(ChoiceSet): + """Choiceset used by SSOTInfobloxConfig.""" + + DONT_CREATE_RECORD = "do-not-create-dns-record" + HOST_RECORD = "create-host-record" + A_RECORD = "create-a-record" + A_AND_PTR_RECORD = "create-a-and-ptr-records" + + CHOICES = ( + (DONT_CREATE_RECORD, "Do not create DNS record"), + (HOST_RECORD, "Create Host record"), + (A_RECORD, "Create A record"), + (A_AND_PTR_RECORD, "Create A and PTR records"), + ) + + +class InfobloxDeletableModelChoices(ChoiceSet): + """Choiceset used by SSOTInfobloxConfig. + + These choices specify types of records that can be allowed to be deleted in Infoblox. + """ + + DNS_A_RECORD = "dns-a-record" + DNS_HOST_RECORD = "dns-host-record" + DNS_PTR_RECORD = "dns-ptr-record" + FIXED_ADDRESS = "fixed-address" + + CHOICES = ( + (DNS_A_RECORD, "DNS A Record"), + (DNS_HOST_RECORD, "DNS Host Record"), + (DNS_PTR_RECORD, "DNS PTR Record"), + (FIXED_ADDRESS, "Fixed Address"), + ) + + +class NautobotDeletableModelChoices(ChoiceSet): + """Choiceset used by SSOTInfobloxConfig. + + These choices specify types of records that can be allowed to be deleted in Nautobot. + """ + + DNS_A_RECORD = "dns-a-record" + DNS_HOST_RECORD = "dns-host-record" + DNS_PTR_RECORD = "dns-ptr-record" + IP_ADDRESS = "ip-address" + VLAN = "vlan" + VLAN_GROUP = "vlan-group" + + CHOICES = ( + (DNS_A_RECORD, "DNS A Record"), + (DNS_HOST_RECORD, "DNS Host Record"), + (DNS_PTR_RECORD, "DNS PTR Record"), + (IP_ADDRESS, "IP Address"), + (VLAN, "VLAN"), + (VLAN_GROUP, "VLAN Group"), + ) diff --git a/nautobot_ssot/integrations/infoblox/constant.py b/nautobot_ssot/integrations/infoblox/constant.py index ba3f0950e..2bcb8b77c 100644 --- a/nautobot_ssot/integrations/infoblox/constant.py +++ b/nautobot_ssot/integrations/infoblox/constant.py @@ -1,34 +1,3 @@ """Constants for use with the Infoblox SSoT app.""" -from django.conf import settings - - -def _read_app_config(): - """Provides backward compatible object after integrating into `nautobot_ssot` App.""" - config = settings.PLUGINS_CONFIG["nautobot_ssot"] - - return { - "NAUTOBOT_INFOBLOX_URL": config.get("infoblox_url"), - "NAUTOBOT_INFOBLOX_USERNAME": config.get("infoblox_username"), - "NAUTOBOT_INFOBLOX_PASSWORD": config.get("infoblox_password"), - "NAUTOBOT_INFOBLOX_VERIFY_SSL": config.get("infoblox_verify_ssl"), - "NAUTOBOT_INFOBLOX_WAPI_VERSION": config.get("infoblox_wapi_version"), - "NAUTOBOT_INFOBLOX_NETWORK_VIEW": config.get("infoblox_network_view"), - "enable_sync_to_infoblox": config.get("infoblox_enable_sync_to_infoblox"), - "enable_rfc1918_network_containers": config.get("infoblox_enable_rfc1918_network_containers"), - "default_status": config.get("infoblox_default_status"), - "infoblox_import_objects": { - "vlan_views": config.get("infoblox_import_objects_vlan_views"), - "vlans": config.get("infoblox_import_objects_vlans"), - "subnets": config.get("infoblox_import_objects_subnets"), - "subnets_ipv6": config.get("infoblox_import_objects_subnets_ipv6"), - "ip_addresses": config.get("infoblox_import_objects_ip_addresses"), - }, - "infoblox_import_subnets": config.get("infoblox_import_subnets"), - "infoblox_request_timeout": int(config.get("infoblox_request_timeout", 60)), - } - - -# Import config vars from nautobot_config.py -PLUGIN_CFG = _read_app_config() TAG_COLOR = "40bfae" diff --git a/nautobot_ssot/integrations/infoblox/diffsync/adapters/infoblox.py b/nautobot_ssot/integrations/infoblox/diffsync/adapters/infoblox.py index 53d02d31a..36f1efca1 100644 --- a/nautobot_ssot/integrations/infoblox/diffsync/adapters/infoblox.py +++ b/nautobot_ssot/integrations/infoblox/diffsync/adapters/infoblox.py @@ -2,43 +2,73 @@ import re +import requests from diffsync import DiffSync from diffsync.enum import DiffSyncFlags from diffsync.exceptions import ObjectAlreadyExists from nautobot.extras.plugins.exceptions import PluginImproperlyConfigured -from nautobot_ssot.integrations.infoblox.constant import PLUGIN_CFG -from nautobot_ssot.integrations.infoblox.utils.client import get_default_ext_attrs, get_dns_name -from nautobot_ssot.integrations.infoblox.utils.diffsync import get_ext_attr_dict, build_vlan_map + +from nautobot_ssot.integrations.infoblox.choices import FixedAddressTypeChoices from nautobot_ssot.integrations.infoblox.diffsync.models.infoblox import ( + InfobloxDnsARecord, + InfobloxDnsHostRecord, + InfobloxDnsPTRRecord, InfobloxIPAddress, + InfobloxNamespace, InfobloxNetwork, - InfobloxVLANView, InfobloxVLAN, + InfobloxVLANView, +) +from nautobot_ssot.integrations.infoblox.utils.client import get_default_ext_attrs +from nautobot_ssot.integrations.infoblox.utils.diffsync import ( + build_vlan_map, + get_ext_attr_dict, + map_network_view_to_namespace, ) +class AdapterLoadException(Exception): + """Raised when there's an error while loading data.""" + + class InfobloxAdapter(DiffSync): """DiffSync adapter using requests to communicate to Infoblox server.""" + namespace = InfobloxNamespace prefix = InfobloxNetwork ipaddress = InfobloxIPAddress vlangroup = InfobloxVLANView vlan = InfobloxVLAN + dnshostrecord = InfobloxDnsHostRecord + dnsarecord = InfobloxDnsARecord + dnsptrrecord = InfobloxDnsPTRRecord - top_level = ["vlangroup", "vlan", "prefix", "ipaddress"] + top_level = [ + "namespace", + "vlangroup", + "vlan", + "prefix", + "ipaddress", + "dnshostrecord", + "dnsarecord", + "dnsptrrecord", + ] - def __init__(self, *args, job=None, sync=None, conn, **kwargs): + def __init__(self, *args, job=None, sync=None, conn, config, **kwargs): """Initialize Infoblox. Args: job (object, optional): Infoblox job. Defaults to None. sync (object, optional): Infoblox DiffSync. Defaults to None. conn (object): InfobloxAPI connection. + config (object): Infoblox config object. """ super().__init__(*args, **kwargs) self.job = job self.sync = sync self.conn = conn + self.config = config + self.excluded_attrs = config.cf_fields_ignore.get("extensible_attributes", []) self.subnets = [] if self.conn in [None, False]: @@ -47,41 +77,128 @@ def __init__(self, *args, job=None, sync=None, conn, **kwargs): ) raise PluginImproperlyConfigured - def load_prefixes(self): - """Load InfobloxNetwork DiffSync model.""" - if PLUGIN_CFG.get("infoblox_import_subnets"): - subnets = [] - containers = [] - for prefix in PLUGIN_CFG["infoblox_import_subnets"]: - # Get all child containers and subnets - tree = self.conn.get_tree_from_container(prefix) - containers.extend(tree) - - # Need to check if the container has children. If it does, we need to get all subnets from the children - # If it doesn't, we can just get all subnets from the container - if tree: - for subnet in tree: - subnets.extend(self.conn.get_child_subnets_from_container(prefix=subnet["network"])) - else: - subnets.extend(self.conn.get_all_subnets(prefix=prefix)) - - # Remove duplicates if a child subnet is included infoblox_import_subnets config - subnets = self.conn.remove_duplicates(subnets) - all_networks = self.conn.remove_duplicates(containers) + subnets - else: - # Need to load containers here to prevent duplicates when syncing back to Infoblox - containers = self.conn.get_network_containers() - subnets = self.conn.get_all_subnets() - if PLUGIN_CFG.get("infoblox_import_objects_subnets_ipv6"): - containers += self.conn.get_network_containers(ipv6=True) - subnets += self.conn.get_all_subnets(ipv6=True) - all_networks = containers + subnets + def load_network_views(self, sync_filters: list): + """Load Namespace DiffSync model. + + Args: + sync_filters (list): Sync filters containing sync rules + """ + if self.job.debug: + self.job.logger.debug("Loading Network Views from Infoblox.") + network_view_filters = {sf["network_view"] for sf in sync_filters if "network_view" in sf} + try: + networkviews = self.conn.get_network_views() + except requests.exceptions.HTTPError as err: + self.job.logger.error(f"Error while loading network views: {str(err)}") + raise AdapterLoadException(str(err)) from err + + default_ext_attrs = get_default_ext_attrs(review_list=networkviews, excluded_attrs=self.excluded_attrs) + for _nv in networkviews: + # Do not load Network Views not present in the sync filters + if _nv["name"] not in network_view_filters: + continue + namespace_name = map_network_view_to_namespace(value=_nv["name"], direction="nv_to_ns") + networkview_ext_attrs = get_ext_attr_dict( + extattrs=_nv.get("extattrs", {}), excluded_attrs=self.excluded_attrs + ) + new_namespace = self.namespace( + name=namespace_name, + ext_attrs={**default_ext_attrs, **networkview_ext_attrs}, + ) + self.add(new_namespace) + + def _load_prefixes_filtered(self, sync_filter: dict, ip_version: str = "ipv4"): + """Loads prefixes from Infoblox based on the provided sync filter. + + Args: + sync_filter (dict): Sync filter containing sync rules + ip_version (str): IP version of prefixes, either "ipv4" or "ipv6" + + Returns: + (tuple): Tuple consisting of list of container prefixes and a list of subnet prefixes + """ + containers = [] + subnets = [] + prefix_filter_attr = f"prefixes_{ip_version}" + network_view = sync_filter["network_view"] + + for prefix in sync_filter[prefix_filter_attr]: + tree = self.conn.get_tree_from_container(root_container=prefix, network_view=network_view) + containers.extend(tree) + # Need to check if the container has children. If it does, we need to get all subnets from the children + # If it doesn't, we can just get all subnets from the container + if tree: + for subnet in tree: + subnets.extend( + self.conn.get_child_subnets_from_container(prefix=subnet["network"], network_view=network_view) + ) + else: + subnets.extend(self.conn.get_all_subnets(prefix=prefix, network_view=network_view)) + + return containers, subnets + + def _load_all_prefixes_filtered(self, sync_filters: list, include_ipv4: bool, include_ipv6: bool): + """Loads all of the Infoblox prefixes based on the sync filter rules. + + Args: + sync_filters (list): List of dicts, each dict is a single sync filter definition + include_ipv4 (bool): Whether to include IPv4 prefixes + include_ipv6 (bool): Whether to include IPv6 prefixes + + Returns: + (tuple): Tuple consisting of list of container prefixes and a list of subnet prefixes + """ + all_containers = [] + all_subnets = [] + for sync_filter in sync_filters: + pfx_filter_ipv4 = "prefixes_ipv4" in sync_filter + pfx_filter_ipv6 = "prefixes_ipv6" in sync_filter + if pfx_filter_ipv4 and include_ipv4: + containers, subnets = self._load_prefixes_filtered(sync_filter=sync_filter, ip_version="ipv4") + all_containers.extend(containers) + all_subnets.extend(subnets) + if pfx_filter_ipv6 and include_ipv6: + containers, subnets = self._load_prefixes_filtered(sync_filter=sync_filter, ip_version="ipv6") + all_subnets.extend(subnets) + all_containers.extend(containers) + # Load all prefixes from a network view if there are no prefix filters + if "network_view" in sync_filter and not (pfx_filter_ipv4 or pfx_filter_ipv6): + network_view = sync_filter["network_view"] + if include_ipv4: + all_containers.extend(self.conn.get_network_containers(network_view=network_view)) + all_subnets.extend(self.conn.get_all_subnets(network_view=network_view)) + if include_ipv6: + all_containers.extend(self.conn.get_network_containers(network_view=network_view, ipv6=True)) + all_subnets.extend(self.conn.get_all_subnets(network_view=network_view, ipv6=True)) + + return all_containers, all_subnets + + def load_prefixes(self, include_ipv4: bool, include_ipv6: bool, sync_filters: list): + """Load InfobloxNetwork DiffSync model. + + Args: + sync_filters (list): List of dicts, each dict is a single sync filter definition + include_ipv4 (bool): Whether to include IPv4 prefixes + include_ipv6 (bool): Whether to include IPv6 prefixes + """ + if self.job.debug: + self.job.logger.debug("Loading Subnets from Infoblox.") + try: + containers, subnets = self._load_all_prefixes_filtered( + sync_filters=sync_filters, include_ipv4=include_ipv4, include_ipv6=include_ipv6 + ) + except requests.exceptions.HTTPError as err: + self.job.logger.error(f"Error while loading prefixes: {str(err)}") + raise AdapterLoadException(str(err)) from err + + all_networks = containers + subnets self.subnets = [(x["network"], x["network_view"]) for x in subnets] - default_ext_attrs = get_default_ext_attrs(review_list=all_networks) + default_ext_attrs = get_default_ext_attrs(review_list=all_networks, excluded_attrs=self.excluded_attrs) for _pf in all_networks: - pf_ext_attrs = get_ext_attr_dict(extattrs=_pf.get("extattrs", {})) + pf_ext_attrs = get_ext_attr_dict(extattrs=_pf.get("extattrs", {}), excluded_attrs=self.excluded_attrs) new_pf = self.prefix( network=_pf["network"], + namespace=map_network_view_to_namespace(value=_pf["network_view"], direction="nv_to_ns"), description=_pf.get("comment", ""), network_type="network" if _pf in subnets else "container", ext_attrs={**default_ext_attrs, **pf_ext_attrs}, @@ -93,40 +210,182 @@ def load_prefixes(self): try: self.add(new_pf) except ObjectAlreadyExists: - self.job.logger.warning( - f"Duplicate prefix found: {new_pf}. Duplicate prefixes are not supported, " - "and only the first occurrence will be included in the sync. To load data " - "from a single Network View, use the 'infoblox_network_view' setting." - ) + self.job.logger.warning(f"Duplicate prefix found: {new_pf}.") - def load_ipaddresses(self): + def load_ipaddresses(self): # pylint: disable=too-many-branches,too-many-locals,too-many-statements """Load InfobloxIPAddress DiffSync model.""" - ipaddrs = self.conn.get_all_ipv4address_networks(prefixes=self.subnets) - default_ext_attrs = get_default_ext_attrs(review_list=ipaddrs) + if self.job.debug: + self.job.logger.debug("Loading IP addresses from Infoblox.") + try: + ipaddrs = self.conn.get_all_ipv4address_networks(prefixes=self.subnets) + except requests.exceptions.HTTPError as err: + self.job.logger.error(f"Error while loading IP addresses: {str(err)}") + raise AdapterLoadException(str(err)) from err + + default_ext_attrs = get_default_ext_attrs(review_list=ipaddrs, excluded_attrs=self.excluded_attrs) for _ip in ipaddrs: _, prefix_length = _ip["network"].split("/") - dns_name = "" - if _ip["names"]: - dns_name = get_dns_name(possible_fqdn=_ip["names"][0]) - ip_ext_attrs = get_ext_attr_dict(extattrs=_ip.get("extattrs", {})) + network_view = _ip["network_view"] + namespace = map_network_view_to_namespace(value=network_view, direction="nv_to_ns") + + ip_ext_attrs = get_ext_attr_dict(extattrs=_ip.get("extattrs", {}), excluded_attrs=self.excluded_attrs) new_ip = self.ipaddress( address=_ip["ip_address"], prefix=_ip["network"], prefix_length=prefix_length, - dns_name=dns_name, + namespace=namespace, status=self.conn.get_ipaddr_status(_ip), - ip_addr_type=self.conn.get_ipaddr_type(_ip), - description=_ip["comment"], + description="", + ip_addr_type="host", ext_attrs={**default_ext_attrs, **ip_ext_attrs}, + mac_address="" if not _ip["mac_address"] else _ip["mac_address"], + fixed_address_comment="", ) - self.add(new_ip) + + # Record references to DNS Records linked to this IP Address. + # Field `comment` in IP Address records can come from linked fixed address or DNS record. + # We add extra logic to tell DNS record and fixed address comments apart. + for ref in _ip["objects"]: + obj_type = ref.split("/")[0] + if obj_type == "record:host": + new_ip.has_host_record = True + host_record_ref = ref + elif obj_type == "record:a": + new_ip.has_a_record = True + a_record_ref = ref + elif obj_type == "record:ptr": + new_ip.has_ptr_record = True + ptr_record_ref = ref + # We currently only support RESERVED and MAC_ADDRESS types for fixed address objects. + elif obj_type == "fixedaddress": + if "RESERVATION" in _ip["types"]: + new_ip.fixed_address_type = "RESERVED" + new_ip.has_fixed_address = True + new_ip.fixed_address_ref = ref + elif "FA" in _ip["types"]: + new_ip.fixed_address_type = "MAC_ADDRESS" + new_ip.has_fixed_address = True + new_ip.fixed_address_ref = ref + + # We use Nautobot IP Address description for Infoblox Fixed Address name + if new_ip.has_fixed_address: + fixed_address = self.conn.get_fixed_address_by_ref(new_ip.fixed_address_ref) + new_ip.description = fixed_address.get("name") or "" + new_ip.fixed_address_comment = fixed_address.get("comment") or "" + + # Default type is `host` but fixed address records must be `dhcp` + if ( + new_ip.has_fixed_address + and self.config.fixed_address_type != FixedAddressTypeChoices.DONT_CREATE_RECORD + ): + new_ip.ip_addr_type = "dhcp" + + # Load individual DNS records + if new_ip.has_a_record: + self._load_dns_a_record_for_ip(ref=a_record_ref, ip_record=new_ip, namespace=namespace) + if new_ip.has_host_record: + self._load_dns_host_record_for_ip(ref=host_record_ref, ip_record=new_ip, namespace=namespace) + if new_ip.has_ptr_record: + self._load_dns_ptr_record_for_ip(ref=ptr_record_ref, ip_record=new_ip, namespace=namespace) + + if new_ip.has_fixed_address or new_ip.has_a_record or new_ip.has_host_record: + self.add(new_ip) + + def _load_dns_host_record_for_ip(self, ref: str, ip_record: object, namespace: str): + """Load the DNS Host record. + + Args: + ref (list): Host record reference + ip_record (object): Parent IP Address record + namespace (str): Namespace of this record + """ + host_record = self.conn.get_host_record_by_ref(ref) + record_ext_attrs = get_ext_attr_dict( + extattrs=host_record.get("extattrs", {}), excluded_attrs=self.excluded_attrs + ) + + new_host_record = self.dnshostrecord( + address=ip_record.address, + prefix=ip_record.prefix, + prefix_length=ip_record.prefix_length, + namespace=namespace, + dns_name=host_record["name"], + ip_addr_type=ip_record.ip_addr_type, + description=host_record.get("comment") or "", + status=ip_record.status, + ext_attrs=record_ext_attrs, + ref=ref, + ) + + self.add(new_host_record) + + def _load_dns_a_record_for_ip(self, ref: str, ip_record: object, namespace: str): + """Load the DNS A record. + + Args: + ref (list): A record reference + ip_record (object): Parent IP Address record + namespace (str): Namespace of this record + """ + a_record = self.conn.get_a_record_by_ref(ref) + record_ext_attrs = get_ext_attr_dict(extattrs=a_record.get("extattrs", {}), excluded_attrs=self.excluded_attrs) + + new_a_record = self.dnsarecord( + address=a_record["ipv4addr"], + prefix=ip_record.prefix, + prefix_length=ip_record.prefix_length, + namespace=namespace, + dns_name=a_record["name"], + ip_addr_type=ip_record.ip_addr_type, + description=a_record.get("comment") or "", + status=ip_record.status, + ext_attrs=record_ext_attrs, + ref=ref, + ) + + self.add(new_a_record) + + def _load_dns_ptr_record_for_ip(self, ref: str, ip_record: object, namespace: str): + """Load the DNS PTR record. + + Args: + ref (list): PTR record reference + ip_record (object): Parent IP Address record + namespace (str): Namespace of this record + """ + ptr_record = self.conn.get_ptr_record_by_ref(ref) + record_ext_attrs = get_ext_attr_dict( + extattrs=ptr_record.get("extattrs", {}), excluded_attrs=self.excluded_attrs + ) + + new_ptr_record = self.dnsptrrecord( + address=ptr_record["ipv4addr"], + prefix=ip_record.prefix, + prefix_length=ip_record.prefix_length, + namespace=namespace, + dns_name=ptr_record["ptrdname"], + ip_addr_type=ip_record.ip_addr_type, + description=ptr_record.get("comment") or "", + status=ip_record.status, + ext_attrs=record_ext_attrs, + ref=ref, + ) + + self.add(new_ptr_record) def load_vlanviews(self): """Load InfobloxVLANView DiffSync model.""" - vlanviews = self.conn.get_vlanviews() - default_ext_attrs = get_default_ext_attrs(review_list=vlanviews) + if self.job.debug: + self.job.logger.debug("Loading VLAN Views from Infoblox.") + try: + vlanviews = self.conn.get_vlanviews() + except requests.exceptions.HTTPError as err: + self.job.logger.error(f"Error while loading VLAN views: {str(err)}") + raise AdapterLoadException(str(err)) from err + + default_ext_attrs = get_default_ext_attrs(review_list=vlanviews, excluded_attrs=self.excluded_attrs) for _vv in vlanviews: - vv_ext_attrs = get_ext_attr_dict(extattrs=_vv.get("extattrs", {})) + vv_ext_attrs = get_ext_attr_dict(extattrs=_vv.get("extattrs", {}), excluded_attrs=self.excluded_attrs) new_vv = self.vlangroup( name=_vv["name"], description=_vv["comment"] if _vv.get("comment") else "", @@ -136,10 +395,17 @@ def load_vlanviews(self): def load_vlans(self): """Load InfobloxVlan DiffSync model.""" - vlans = self.conn.get_vlans() - default_ext_attrs = get_default_ext_attrs(review_list=vlans) + if self.job.debug: + self.job.logger.debug("Loading VLANs from Infoblox.") + try: + vlans = self.conn.get_vlans() + except requests.exceptions.HTTPError as err: + self.job.logger.error(f"Error while loading VLANs: {str(err)}") + raise AdapterLoadException(str(err)) from err + + default_ext_attrs = get_default_ext_attrs(review_list=vlans, excluded_attrs=self.excluded_attrs) for _vlan in vlans: - vlan_ext_attrs = get_ext_attr_dict(extattrs=_vlan.get("extattrs", {})) + vlan_ext_attrs = get_ext_attr_dict(extattrs=_vlan.get("extattrs", {}), excluded_attrs=self.excluded_attrs) vlan_group = re.search(r"(?:.+\:)(\S+)(?:\/\S+\/.+)", _vlan["_ref"]) new_vlan = self.vlan( name=_vlan["name"], @@ -153,22 +419,20 @@ def load_vlans(self): def load(self): """Load all models by calling other methods.""" - if "infoblox_import_objects" in PLUGIN_CFG: - if PLUGIN_CFG["infoblox_import_objects"].get("subnets"): - self.load_prefixes() - if PLUGIN_CFG["infoblox_import_objects"].get("ip_addresses"): - self.load_ipaddresses() - if PLUGIN_CFG["infoblox_import_objects"].get("vlan_views"): - self.load_vlanviews() - if PLUGIN_CFG["infoblox_import_objects"].get("vlans"): - self.load_vlans() - else: - self.job.logger.info("The `infoblox_import_objects` setting was not found so all objects will be imported.") - self.load_prefixes() + include_ipv4 = self.config.import_ipv4 + include_ipv6 = self.config.import_ipv6 + sync_filters = self.config.infoblox_sync_filters + + self.load_network_views(sync_filters=sync_filters) + if self.config.import_subnets: + self.load_prefixes(include_ipv4=include_ipv4, include_ipv6=include_ipv6, sync_filters=sync_filters) + if self.config.import_ip_addresses: self.load_ipaddresses() + if self.config.import_vlan_views: self.load_vlanviews() + if self.config.import_vlans: self.load_vlans() - for obj in ["prefix", "ipaddress", "vlangroup", "vlan"]: + for obj in ["namespace", "prefix", "ipaddress", "vlangroup", "vlan"]: if obj in self.dict(): self.job.logger.info(f"Loaded {len(self.dict()[obj])} {obj} from Infoblox.") diff --git a/nautobot_ssot/integrations/infoblox/diffsync/adapters/nautobot.py b/nautobot_ssot/integrations/infoblox/diffsync/adapters/nautobot.py index 682c40af2..95d857070 100644 --- a/nautobot_ssot/integrations/infoblox/diffsync/adapters/nautobot.py +++ b/nautobot_ssot/integrations/infoblox/diffsync/adapters/nautobot.py @@ -2,24 +2,35 @@ # pylint: disable=duplicate-code import datetime +from typing import Optional + from diffsync import DiffSync from diffsync.exceptions import ObjectAlreadyExists, ObjectNotFound from django.contrib.contenttypes.models import ContentType from nautobot.dcim.models import Location from nautobot.extras.choices import CustomFieldTypeChoices -from nautobot.extras.models import Relationship, Role, Status, Tag, CustomField -from nautobot.ipam.models import IPAddress, Prefix, VLAN, VLANGroup +from nautobot.extras.models import CustomField, Relationship, Role, Status, Tag +from nautobot.ipam.choices import IPAddressTypeChoices +from nautobot.ipam.models import VLAN, IPAddress, Namespace, Prefix, VLANGroup from nautobot.tenancy.models import Tenant + +from nautobot_ssot.integrations.infoblox.choices import DNSRecordTypeChoices, FixedAddressTypeChoices +from nautobot_ssot.integrations.infoblox.constant import TAG_COLOR from nautobot_ssot.integrations.infoblox.diffsync.models import ( - NautobotNetwork, + NautobotDnsARecord, + NautobotDnsHostRecord, + NautobotDnsPTRRecord, NautobotIPAddress, - NautobotVlanGroup, + NautobotNamespace, + NautobotNetwork, NautobotVlan, + NautobotVlanGroup, ) -from nautobot_ssot.integrations.infoblox.constant import TAG_COLOR from nautobot_ssot.integrations.infoblox.utils.diffsync import ( - nautobot_vlan_status, get_default_custom_fields, + get_valid_custom_fields, + map_network_view_to_namespace, + nautobot_vlan_status, ) from nautobot_ssot.integrations.infoblox.utils.nautobot import build_vlan_map_from_relations, get_prefix_vlans @@ -89,34 +100,42 @@ def _tag_object(nautobot_object): class NautobotAdapter(NautobotMixin, DiffSync): # pylint: disable=too-many-instance-attributes """DiffSync adapter using ORM to communicate to Nautobot.""" + namespace = NautobotNamespace prefix = NautobotNetwork ipaddress = NautobotIPAddress vlangroup = NautobotVlanGroup vlan = NautobotVlan + dnshostrecord = NautobotDnsHostRecord + dnsarecord = NautobotDnsARecord + dnsptrrecord = NautobotDnsPTRRecord - top_level = ["vlangroup", "vlan", "prefix", "ipaddress"] + top_level = ["namespace", "vlangroup", "vlan", "prefix", "ipaddress", "dnshostrecord", "dnsarecord", "dnsptrrecord"] status_map = {} location_map = {} relationship_map = {} tenant_map = {} vrf_map = {} + namespace_map = {} prefix_map = {} role_map = {} ipaddr_map = {} vlan_map = {} vlangroup_map = {} - def __init__(self, *args, job=None, sync=None, **kwargs): + def __init__(self, *args, job=None, sync=None, config, **kwargs): """Initialize Nautobot. Args: job (object, optional): Nautobot job. Defaults to None. sync (object, optional): Nautobot DiffSync. Defaults to None. + config (object): Infoblox config object. """ super().__init__(*args, **kwargs) self.job = job self.sync = sync + self.config = config + self.excluded_cfs = config.cf_fields_ignore.get("custom_fields", []) def sync_complete(self, source: DiffSync, *args, **kwargs): """Process object creations/updates using bulk operations. @@ -126,24 +145,117 @@ def sync_complete(self, source: DiffSync, *args, **kwargs): """ super().sync_complete(source, *args, **kwargs) - def load_prefixes(self): - """Load Prefixes from Nautobot.""" - all_prefixes = Prefix.objects.all() - default_cfs = get_default_custom_fields(cf_contenttype=ContentType.objects.get_for_model(Prefix)) + def _get_namespaces_from_sync_filters(self, sync_filters: list) -> set: + """Get namespaces defined in filters. + + Args: + sync_filters (list): Sync filters containing sync rules + """ + namespaces = set() + for sync_filter in sync_filters: + namespace_name = map_network_view_to_namespace(value=sync_filter["network_view"], direction="nv_to_ns") + namespaces.add(namespace_name) + + return namespaces + + def load_namespaces(self, sync_filters: Optional[list] = None): + """Load Namespace DiffSync model. + + Args: + sync_filters (list): Sync filters containing sync rules + """ + if self.job.debug: + self.job.logger.debug("Loading Namespaces from Nautobot.") + namespace_names = None + if sync_filters: + namespace_names = self._get_namespaces_from_sync_filters(sync_filters) + if namespace_names: + all_namespaces = Namespace.objects.filter(name__in=namespace_names) + else: + all_namespaces = Namespace.objects.all() + + default_cfs = get_default_custom_fields( + cf_contenttype=ContentType.objects.get_for_model(Namespace), excluded_cfs=self.excluded_cfs + ) + for namespace in all_namespaces: + self.namespace_map[namespace.name] = namespace.id + custom_fields = get_valid_custom_fields(namespace.custom_field_data, excluded_cfs=self.excluded_cfs) + _namespace = self.namespace( + name=namespace.name, + ext_attrs={**default_cfs, **custom_fields}, + pk=namespace.id, + ) + try: + self.add(_namespace) + except ObjectAlreadyExists: + self.job.logger.warning(f"Found duplicate namespace: {namespace.name}.") + + def _load_all_prefixes_filtered(self, sync_filters: list, include_ipv4: bool, include_ipv6: bool): + """Loads prefixes from Nautobot based on the provided sync filter. + + Args: + sync_filter (dict): Sync filter containing sync rules + include_ipv4 (bool): Whether to include IPv4 prefixes + include_ipv6 (bool): Whether to include IPv6 prefixes + + Returns: + (PrefixQuerySet): PrefixQuerySet with prefixes + """ + all_prefixes = Prefix.objects.none() + for sync_filter in sync_filters: + query_filters = {} + if "network_view" in sync_filter: + namespace = map_network_view_to_namespace(sync_filter["network_view"], direction="nv_to_ns") + query_filters["namespace__name"] = namespace + if "prefixes_ipv4" in sync_filter and include_ipv4: + for pfx_ipv4 in sync_filter["prefixes_ipv4"]: + query_filters["network__net_contained_or_equal"] = pfx_ipv4 + all_prefixes = all_prefixes.union(Prefix.objects.filter(**query_filters)) + if "prefixes_ipv6" in sync_filter and include_ipv6: + for pfx_ipv6 in sync_filter["prefixes_ipv6"]: + query_filters["network__net_contained_or_equal"] = pfx_ipv6 + all_prefixes = all_prefixes.union(Prefix.objects.filter(**query_filters)) + # Filter on namespace name only + if "prefixes_ipv4" not in sync_filter and "prefixes_ipv6" not in sync_filter: + if include_ipv4 and not include_ipv6: + query_filters["ip_version"] = 4 + elif include_ipv6 and not include_ipv4: + query_filters["ip_version"] = 6 + all_prefixes = all_prefixes.union(Prefix.objects.filter(**query_filters)) + + return all_prefixes + + def load_prefixes(self, include_ipv4: bool, include_ipv6: bool, sync_filters: list): + """Load Prefixes from Nautobot. + + Args: + sync_filters (list): List of dicts, each dict is a single sync filter definition + include_ipv4 (bool): Whether to include IPv4 prefixes + include_ipv6 (bool): Whether to include IPv6 prefixes + """ + if self.job.debug: + self.job.logger.debug("Loading Prefixes from Nautobot.") + all_prefixes = self._load_all_prefixes_filtered( + sync_filters=sync_filters, include_ipv4=include_ipv4, include_ipv6=include_ipv6 + ) + + default_cfs = get_default_custom_fields( + cf_contenttype=ContentType.objects.get_for_model(Prefix), excluded_cfs=self.excluded_cfs + ) for prefix in all_prefixes: - self.prefix_map[str(prefix.prefix)] = prefix.id - if "ssot_synced_to_infoblox" in prefix.custom_field_data: - prefix.custom_field_data.pop("ssot_synced_to_infoblox") + self.prefix_map[(prefix.namespace.name), str(prefix.prefix)] = prefix.id + dhcp_ranges = prefix.cf.get("dhcp_ranges") current_vlans = get_prefix_vlans(prefix=prefix) + custom_fields = get_valid_custom_fields(prefix.custom_field_data, excluded_cfs=self.excluded_cfs) _prefix = self.prefix( network=str(prefix.prefix), + namespace=prefix.namespace.name, description=prefix.description, network_type=prefix.type, - ext_attrs={**default_cfs, **prefix.custom_field_data}, + ext_attrs={**default_cfs, **custom_fields}, vlans=build_vlan_map_from_relations(vlans=current_vlans), pk=prefix.id, ) - dhcp_ranges = prefix.cf.get("dhcp_ranges") if dhcp_ranges: _prefix.ranges = dhcp_ranges.split(",") try: @@ -151,19 +263,68 @@ def load_prefixes(self): except ObjectAlreadyExists: self.job.logger.warning(f"Found duplicate prefix: {prefix.prefix}.") - def load_ipaddresses(self): - """Load IP Addresses from Nautobot.""" - default_cfs = get_default_custom_fields(cf_contenttype=ContentType.objects.get_for_model(IPAddress)) - for ipaddr in IPAddress.objects.all(): - self.ipaddr_map[str(ipaddr.address)] = ipaddr.id + def _load_all_ipaddresses_filtered(self, sync_filters: list, include_ipv4: bool, include_ipv6: bool): + """Loads ip addresses from Nautobot based on the provided sync filter. + + Args: + sync_filter (dict): Sync filter containing sync rules + include_ipv4 (bool): Whether to include IPv4 addresses + include_ipv6 (bool): Whether to include IPv6 addresses + + Returns: + (IPAddressQuerySet): IPAddressQuerySet with ip addresses + """ + all_ipaddresses = IPAddress.objects.none() + for sync_filter in sync_filters: + query_filters = {} + if "network_view" in sync_filter: + namespace = map_network_view_to_namespace(sync_filter["network_view"], direction="nv_to_ns") + query_filters["parent__namespace__name"] = namespace + if "prefixes_ipv4" in sync_filter and include_ipv4: + query_filters["host__net_in"] = sync_filter["prefixes_ipv4"] + all_ipaddresses = all_ipaddresses.union(IPAddress.objects.filter(**query_filters)) + if "prefixes_ipv6" in sync_filter and include_ipv6: + query_filters["host__net_in"] = sync_filter["prefixes_ipv6"] + all_ipaddresses = all_ipaddresses.union(IPAddress.objects.filter(**query_filters)) + # Filter on namespace name only + if "prefixes_ipv4" not in sync_filter and "prefixes_ipv6" not in sync_filter: + if include_ipv4 and not include_ipv6: + query_filters["ip_version"] = 4 + elif include_ipv6 and not include_ipv4: + query_filters["ip_version"] = 6 + all_ipaddresses = all_ipaddresses.union(IPAddress.objects.filter(**query_filters)) + + return all_ipaddresses + + def load_ipaddresses( + self, include_ipv4: bool, include_ipv6: bool, sync_filters: list + ): # pylint: disable=too-many-branches + """Load IP Addresses from Nautobot. + + Args: + sync_filters (list): List of dicts, each dict is a single sync filter definition + include_ipv4 (bool): Whether to include IPv4 IP addresses + include_ipv6 (bool): Whether to include IPv6 addresses + """ + if self.job.debug: + self.job.logger.debug("Loading IP Addresses from Nautobot.") + default_cfs = get_default_custom_fields( + cf_contenttype=ContentType.objects.get_for_model(IPAddress), excluded_cfs=self.excluded_cfs + ) + all_ipaddresses = self._load_all_ipaddresses_filtered( + sync_filters=sync_filters, include_ipv4=include_ipv4, include_ipv6=include_ipv6 + ) + for ipaddr in all_ipaddresses: addr = ipaddr.host - # the last Prefix is the most specific and is assumed the one the IP address resides in - prefix = Prefix.objects.net_contains(addr).last() + prefix = ipaddr.parent # The IP address must have a parent prefix + # Note: In Nautobot 2.0 IP Address *must* have a parent prefix so this should not happen if not prefix: self.job.logger.warning(f"IP Address {addr} does not have a parent prefix and will not be synced.") + self.ipaddr_map[str(ipaddr.address), "Global"] = ipaddr.id continue + self.ipaddr_map[str(ipaddr.address), prefix.namespace.name] = ipaddr.id # IP address must be part of a prefix that is not a container # This means the IP cannot be associated with an IPv4 Network within Infoblox if prefix.type == "container": @@ -172,77 +333,207 @@ def load_ipaddresses(self): ) continue - if "ssot_synced_to_infoblox" in ipaddr.custom_field_data: - ipaddr.custom_field_data.pop("ssot_synced_to_infoblox") + # Infoblox fixed address records are of type DHCP. Only Nautobot IP addresses of type DHCP will trigger fixed address creation logic. + has_fixed_address = False + mac_address = ipaddr.custom_field_data.get("mac_address") or "" + if ipaddr.type == IPAddressTypeChoices.TYPE_DHCP: + if self.config.fixed_address_type == FixedAddressTypeChoices.MAC_ADDRESS and mac_address: + has_fixed_address = True + elif self.config.fixed_address_type == FixedAddressTypeChoices.RESERVED: + has_fixed_address = True + + # Description is used to derive name of the fixed record + if self.config.fixed_address_type != FixedAddressTypeChoices.DONT_CREATE_RECORD: + description = ipaddr.description + else: + description = "" + + custom_fields = get_valid_custom_fields(ipaddr.custom_field_data, excluded_cfs=self.excluded_cfs) _ip = self.ipaddress( address=addr, prefix=str(prefix), + namespace=prefix.namespace.name, status=ipaddr.status.name if ipaddr.status else None, ip_addr_type=ipaddr.type, prefix_length=prefix.prefix_length if prefix else ipaddr.prefix_length, - dns_name=ipaddr.dns_name, - description=ipaddr.description, - ext_attrs={**default_cfs, **ipaddr.custom_field_data}, + description=description, + ext_attrs={**default_cfs, **custom_fields}, + mac_address=mac_address, pk=ipaddr.id, + has_fixed_address=has_fixed_address, + # Only set fixed address comment if we create fixed addresses. + fixed_address_comment=( + ipaddr.custom_field_data.get("fixed_address_comment") or "" if has_fixed_address else "" + ), ) + + # Pretend IP Address has matching DNS records if `dns_name` is defined. + # This will be compared against values set on Infoblox side. + if ipaddr.dns_name: + if self.config.dns_record_type == DNSRecordTypeChoices.HOST_RECORD: + _ip.has_host_record = True + self._load_dns_host_record_for_ip( + ip_record=_ip, dns_name=ipaddr.dns_name, cfs=ipaddr.custom_field_data + ) + elif self.config.dns_record_type == DNSRecordTypeChoices.A_RECORD: + _ip.has_a_record = True + self._load_dns_a_record_for_ip( + ip_record=_ip, dns_name=ipaddr.dns_name, cfs=ipaddr.custom_field_data + ) + elif self.config.dns_record_type == DNSRecordTypeChoices.A_AND_PTR_RECORD: + _ip.has_a_record = True + _ip.has_ptr_record = True + self._load_dns_ptr_record_for_ip( + ip_record=_ip, dns_name=ipaddr.dns_name, cfs=ipaddr.custom_field_data + ) + self._load_dns_a_record_for_ip( + ip_record=_ip, dns_name=ipaddr.dns_name, cfs=ipaddr.custom_field_data + ) + try: self.add(_ip) except ObjectAlreadyExists: self.job.logger.warning(f"Duplicate IP Address detected: {addr}.") + def _load_dns_host_record_for_ip(self, ip_record: NautobotIPAddress, dns_name: str, cfs: dict): + """Load the DNS Host record. + + Args: + ip_record (NautobotIPAddress): Parent IP Address record + dns_name (str): DNS Name + cfs (dict): Custom fields + """ + new_host_record = self.dnshostrecord( + address=ip_record.address, + prefix=ip_record.prefix, + prefix_length=ip_record.prefix_length, + namespace=ip_record.namespace, + dns_name=dns_name, + ip_addr_type=ip_record.ip_addr_type, + description=cfs.get("dns_host_record_comment") or "", + status=ip_record.status, + ext_attrs=ip_record.ext_attrs, + pk=ip_record.pk, + ) + + self.add(new_host_record) + + def _load_dns_a_record_for_ip(self, ip_record: NautobotIPAddress, dns_name: str, cfs: dict): + """Load the DNS A record. + + Args: + ip_record (NautobotIPAddress): Parent IP Address record + dns_name (str): DNS Name + cfs (dict): Custom fields + """ + new_a_record = self.dnsarecord( + address=ip_record.address, + prefix=ip_record.prefix, + prefix_length=ip_record.prefix_length, + namespace=ip_record.namespace, + dns_name=dns_name, + ip_addr_type=ip_record.ip_addr_type, + description=cfs.get("dns_a_record_comment") or "", + status=ip_record.status, + ext_attrs=ip_record.ext_attrs, + pk=ip_record.pk, + ) + + self.add(new_a_record) + + def _load_dns_ptr_record_for_ip(self, ip_record: NautobotIPAddress, dns_name: str, cfs: dict): + """Load the DNS PTR record. + + Args: + ip_record (NautobotIPAddress): Parent IP Address record + dns_name (str): DNS Name + cfs (dict): Custom fields + """ + new_ptr_record = self.dnsptrrecord( + address=ip_record.address, + prefix=ip_record.prefix, + prefix_length=ip_record.prefix_length, + namespace=ip_record.namespace, + dns_name=dns_name, + ip_addr_type=ip_record.ip_addr_type, + description=cfs.get("dns_ptr_record_comment") or "", + status=ip_record.status, + ext_attrs=ip_record.ext_attrs, + pk=ip_record.pk, + ) + + self.add(new_ptr_record) + def load_vlangroups(self): """Load VLAN Groups from Nautobot.""" - default_cfs = get_default_custom_fields(cf_contenttype=ContentType.objects.get_for_model(VLANGroup)) + if self.job.debug: + self.job.logger.debug("Loading VLAN Groups from Nautobot.") + default_cfs = get_default_custom_fields( + cf_contenttype=ContentType.objects.get_for_model(VLANGroup), excluded_cfs=self.excluded_cfs + ) for grp in VLANGroup.objects.all(): self.vlangroup_map[grp.name] = grp.id - if "ssot_synced_to_infoblox" in grp.custom_field_data: - grp.custom_field_data.pop("ssot_synced_to_infoblox") + custom_fields = get_valid_custom_fields(grp.custom_field_data, excluded_cfs=self.excluded_cfs) _vg = self.vlangroup( name=grp.name, description=grp.description, - ext_attrs={**default_cfs, **grp.custom_field_data}, + ext_attrs={**default_cfs, **custom_fields}, pk=grp.id, ) self.add(_vg) def load_vlans(self): """Load VLANs from Nautobot.""" - default_cfs = get_default_custom_fields(cf_contenttype=ContentType.objects.get_for_model(VLAN)) + if self.job.debug: + self.job.logger.debug("Loading VLANs from Nautobot.") + default_cfs = get_default_custom_fields( + cf_contenttype=ContentType.objects.get_for_model(VLAN), excluded_cfs=self.excluded_cfs + ) # To ensure we are only dealing with VLANs imported from Infoblox we need to filter to those with a # VLAN Group assigned to match how Infoblox requires a VLAN View to be associated to VLANs. for vlan in VLAN.objects.filter(vlan_group__isnull=False): if vlan.vlan_group.name not in self.vlan_map: self.vlan_map[vlan.vlan_group.name] = {} self.vlan_map[vlan.vlan_group.name][vlan.vid] = vlan.id - if "ssot_synced_to_infoblox" in vlan.custom_field_data: - vlan.custom_field_data.pop("ssot_synced_to_infoblox") + custom_fields = get_valid_custom_fields(vlan.custom_field_data, excluded_cfs=self.excluded_cfs) _vlan = self.vlan( vid=vlan.vid, name=vlan.name, description=vlan.description, vlangroup=vlan.vlan_group.name if vlan.vlan_group else "", status=nautobot_vlan_status(vlan.status.name), - ext_attrs={**default_cfs, **vlan.custom_field_data}, + ext_attrs={**default_cfs, **custom_fields}, pk=vlan.id, ) self.add(_vlan) def load(self): """Load models with data from Nautobot.""" + include_ipv4 = self.config.import_ipv4 + include_ipv6 = self.config.import_ipv6 + sync_filters = self.config.infoblox_sync_filters + self.relationship_map = {r.label: r.id for r in Relationship.objects.only("id", "label")} self.status_map = {s.name: s.id for s in Status.objects.only("id", "name")} self.location_map = {loc.name: loc.id for loc in Location.objects.only("id", "name")} self.tenant_map = {t.name: t.id for t in Tenant.objects.only("id", "name")} self.role_map = {r.name: r.id for r in Role.objects.only("id", "name")} - self.load_prefixes() + self.load_namespaces(sync_filters=sync_filters) + if "namespace" in self.dict(): + self.job.logger.info(f"Loaded {len(self.dict()['namespace'])} Namespaces from Nautobot.") + if self.config.import_subnets: + self.load_prefixes(sync_filters=sync_filters, include_ipv4=include_ipv4, include_ipv6=include_ipv6) if "prefix" in self.dict(): self.job.logger.info(f"Loaded {len(self.dict()['prefix'])} prefixes from Nautobot.") - self.load_ipaddresses() + if self.config.import_ip_addresses: + self.load_ipaddresses(sync_filters=sync_filters, include_ipv4=include_ipv4, include_ipv6=include_ipv6) if "ipaddress" in self.dict(): self.job.logger.info(f"Loaded {len(self.dict()['ipaddress'])} IP addresses from Nautobot.") - self.load_vlangroups() + if self.config.import_vlan_views: + self.load_vlangroups() if "vlangroup" in self.dict(): self.job.logger.info(f"Loaded {len(self.dict()['vlangroup'])} VLAN Groups from Nautobot.") - self.load_vlans() + if self.config.import_vlans: + self.load_vlans() if "vlan" in self.dict(): self.job.logger.info(f"Loaded {len(self.dict()['vlan'])} VLANs from Nautobot.") diff --git a/nautobot_ssot/integrations/infoblox/diffsync/models/__init__.py b/nautobot_ssot/integrations/infoblox/diffsync/models/__init__.py index 15cdf49df..7bfda1de1 100644 --- a/nautobot_ssot/integrations/infoblox/diffsync/models/__init__.py +++ b/nautobot_ssot/integrations/infoblox/diffsync/models/__init__.py @@ -1,14 +1,39 @@ """Initialize models for Nautobot and Infoblox.""" -from .nautobot import NautobotNetwork, NautobotIPAddress, NautobotVlanGroup, NautobotVlan -from .infoblox import InfobloxNetwork, InfobloxIPAddress, InfobloxVLANView, InfobloxVLAN - +from .infoblox import ( + InfobloxDnsARecord, + InfobloxDnsHostRecord, + InfobloxDnsPTRRecord, + InfobloxIPAddress, + InfobloxNamespace, + InfobloxNetwork, + InfobloxVLAN, + InfobloxVLANView, +) +from .nautobot import ( + NautobotDnsARecord, + NautobotDnsHostRecord, + NautobotDnsPTRRecord, + NautobotIPAddress, + NautobotNamespace, + NautobotNetwork, + NautobotVlan, + NautobotVlanGroup, +) __all__ = [ + "NautobotDnsARecord", + "NautobotDnsHostRecord", + "NautobotDnsPTRRecord", + "NautobotNamespace", "NautobotNetwork", "NautobotIPAddress", "NautobotVlanGroup", "NautobotVlan", + "InfobloxDnsARecord", + "InfobloxDnsHostRecord", + "InfobloxDnsPTRRecord", + "InfobloxNamespace", "InfobloxNetwork", "InfobloxIPAddress", "InfobloxVLANView", diff --git a/nautobot_ssot/integrations/infoblox/diffsync/models/base.py b/nautobot_ssot/integrations/infoblox/diffsync/models/base.py index 1d3f42cc1..628580399 100644 --- a/nautobot_ssot/integrations/infoblox/diffsync/models/base.py +++ b/nautobot_ssot/integrations/infoblox/diffsync/models/base.py @@ -2,17 +2,31 @@ import uuid from typing import Optional + from diffsync import DiffSyncModel +class Namespace(DiffSyncModel): + """Namespace model for DiffSync.""" + + _modelname = "namespace" + _identifiers = ("name",) + _attributes = ("ext_attrs",) + + name: str + ext_attrs: Optional[dict] + pk: Optional[uuid.UUID] = None + + class Network(DiffSyncModel): """Network model for DiffSync.""" _modelname = "prefix" - _identifiers = ("network",) + _identifiers = ("network", "namespace") _attributes = ("description", "network_type", "ext_attrs", "vlans", "ranges") network: str + namespace: str description: Optional[str] network_type: Optional[str] ext_attrs: Optional[dict] @@ -54,15 +68,116 @@ class IPAddress(DiffSyncModel): """IPAddress model for DiffSync.""" _modelname = "ipaddress" - _identifiers = ("address", "prefix", "prefix_length") - _attributes = ("description", "dns_name", "status", "ip_addr_type", "ext_attrs") + _identifiers = ("address", "prefix", "prefix_length", "namespace") + _attributes = ( + "description", + "status", + "ip_addr_type", + "ext_attrs", + "has_host_record", + "has_a_record", + "has_ptr_record", + "has_fixed_address", + "mac_address", + "fixed_address_comment", + ) address: str - dns_name: str prefix: str prefix_length: int + namespace: str status: Optional[str] ip_addr_type: Optional[str] description: Optional[str] ext_attrs: Optional[dict] + has_a_record: bool = False + has_host_record: bool = False + has_ptr_record: bool = False + has_fixed_address: bool = False + mac_address: Optional[str] + fixed_address_comment: Optional[str] + + pk: Optional[uuid.UUID] = None + fixed_address_ref: Optional[str] = None + fixed_address_type: Optional[str] = None + + +class DnsARecord(DiffSyncModel): + """DnsARecord model for DiffSync.""" + + _modelname = "dnsarecord" + _identifiers = ("address", "prefix", "prefix_length", "namespace") + _attributes = ( + "dns_name", + "ip_addr_type", + "description", + "status", + "ext_attrs", + ) + + address: str + prefix: str + prefix_length: int + namespace: str + dns_name: str + ip_addr_type: str + description: Optional[str] + status: Optional[str] + ext_attrs: Optional[dict] + + pk: Optional[uuid.UUID] = None + ref: Optional[str] = None + + +class DnsHostRecord(DiffSyncModel): + """DnsHostRecord model for DiffSync.""" + + _modelname = "dnshostrecord" + _identifiers = ("address", "prefix", "prefix_length", "namespace") + _attributes = ( + "dns_name", + "ip_addr_type", + "description", + "status", + "ext_attrs", + ) + + address: str + prefix: str + prefix_length: int + namespace: str + dns_name: str + ip_addr_type: str + description: Optional[str] + status: Optional[str] + ext_attrs: Optional[dict] + + pk: Optional[uuid.UUID] = None + ref: Optional[str] = None + + +class DnsPTRRecord(DiffSyncModel): + """DnsPTRRecord model for DiffSync.""" + + _modelname = "dnsptrrecord" + _identifiers = ("address", "prefix", "prefix_length", "namespace") + _attributes = ( + "dns_name", + "ip_addr_type", + "description", + "status", + "ext_attrs", + ) + + address: str + prefix: str + prefix_length: int + namespace: str + dns_name: str + ip_addr_type: str + description: Optional[str] + status: Optional[str] + ext_attrs: Optional[dict] + pk: Optional[uuid.UUID] = None + ref: Optional[str] = None diff --git a/nautobot_ssot/integrations/infoblox/diffsync/models/infoblox.py b/nautobot_ssot/integrations/infoblox/diffsync/models/infoblox.py index 1613488cc..8c5523ffb 100644 --- a/nautobot_ssot/integrations/infoblox/diffsync/models/infoblox.py +++ b/nautobot_ssot/integrations/infoblox/diffsync/models/infoblox.py @@ -1,7 +1,23 @@ """Infoblox Models for Infoblox integration with SSoT app.""" from requests.exceptions import HTTPError -from nautobot_ssot.integrations.infoblox.diffsync.models.base import Network, IPAddress, Vlan, VlanView + +from nautobot_ssot.integrations.infoblox.choices import ( + DNSRecordTypeChoices, + FixedAddressTypeChoices, + InfobloxDeletableModelChoices, +) +from nautobot_ssot.integrations.infoblox.diffsync.models.base import ( + DnsARecord, + DnsHostRecord, + DnsPTRRecord, + IPAddress, + Namespace, + Network, + Vlan, + VlanView, +) +from nautobot_ssot.integrations.infoblox.utils.diffsync import map_network_view_to_namespace, validate_dns_name class InfobloxNetwork(Network): @@ -10,15 +26,20 @@ class InfobloxNetwork(Network): @classmethod def create(cls, diffsync, ids, attrs): """Create Network object in Infoblox.""" - status = attrs.get("status") + network_type = attrs.get("network_type") network = ids["network"] + network_view = map_network_view_to_namespace(value=ids["namespace"], direction="ns_to_nv") try: - if status != "container": - diffsync.conn.create_network(prefix=network, comment=attrs.get("description", "")) + if network_type != "container": + diffsync.conn.create_network( + prefix=network, comment=attrs.get("description", ""), network_view=network_view + ) else: - diffsync.conn.create_network_container(prefix=network, comment=attrs.get("description", "")) + diffsync.conn.create_network_container( + prefix=network, comment=attrs.get("description", ""), network_view=network_view + ) except HTTPError as err: - diffsync.job.logger.warning(f"Failed to create {ids['network']} due to {err.response.text}") + diffsync.job.logger.warning(f"Failed to create {network}-{network_view} due to {err.response.text}") dhcp_ranges = attrs.get("ranges") if dhcp_ranges: for dhcp_range in dhcp_ranges: @@ -28,79 +49,496 @@ def create(cls, diffsync, ids, attrs): prefix=network, start=start.strip(), end=end.strip(), + network_view=network_view, ) except HTTPError as err: - diffsync.job.logger.warning(f"Failed to create {dhcp_range} due to {err.response.text}") + diffsync.job.logger.warning( + f"Failed to create {dhcp_range}-{network_view} due to {err.response.text}" + ) return super().create(ids=ids, diffsync=diffsync, attrs=attrs) def update(self, attrs): """Update Network object in Infoblox.""" + network_view = map_network_view_to_namespace(value=self.get_identifiers()["namespace"], direction="ns_to_nv") self.diffsync.conn.update_network( - prefix=self.get_identifiers()["network"], comment=attrs.get("description", "") + prefix=self.get_identifiers()["network"], + network_view=network_view, + comment=attrs.get("description", ""), ) if attrs.get("ranges"): self.diffsync.job.logger.warning( - f"Prefix, {self.network}, has a change of Ranges in Nautobot, but" - "updating InfoBlox with Ranges is currently not supported." + f"Prefix, {self.network}-{self.namespace}, has a change of Ranges in Nautobot, but" + " updating Ranges in InfoBlox is currently not supported." ) return super().update(attrs) - # def delete(self): - # """Delete Network object in Infoblox.""" - # self.diffsync.conn.delete_network(self.get_identifiers()["network"]) - # return super().delete() - class InfobloxVLANView(VlanView): """Infoblox implementation of the VLANView Model.""" - # @classmethod - # def create(cls, diffsync, ids, attrs): - # """Create VLANView object in Infoblox.""" - # diffsync.conn.create_vlan( - # vlan_id=ids["vid"], - # vlan_name=attrs["vlan_name"], - # vlan_view=attrs["vlangroup"] if attrs.get("vlangroup") else "nautobot", - # ) - # return super().create(ids=ids, diffsync=diffsync, attrs=attrs) - class InfobloxVLAN(Vlan): """Infoblox implementation of the VLAN Model.""" -# @classmethod -# def create(cls, diffsync, ids, attrs): -# """Create VLAN object in Infoblox.""" -# diffsync.conn.create_vlan_view(name=ids.name) -# return super().create(ids=ids, diffsync=diffsync, attrs=attrs) - - class InfobloxIPAddress(IPAddress): """Infoblox implementation of the VLAN Model.""" @classmethod def create(cls, diffsync, ids, attrs): - """Create either a host record or fixed address (Not implemented). + """Creates Fixed Address record.""" + network_view = map_network_view_to_namespace(value=ids["namespace"], direction="ns_to_nv") + ip_address = ids["address"] + mac_address = attrs.get("mac_address") + has_fixed_address = attrs.get("has_fixed_address", False) + fixed_address_name = attrs.get("description") or "" + fixed_address_comment = attrs.get("fixed_address_comment") or "" + + if diffsync.config.fixed_address_type == FixedAddressTypeChoices.RESERVED and has_fixed_address: + diffsync.conn.create_fixed_address( + ip_address=ip_address, + name=fixed_address_name, + comment=fixed_address_comment, + match_client="RESERVED", + network_view=network_view, + ) + if diffsync.job.debug: + diffsync.job.logger.debug( + "Created fixed address reservation, address: %s, name: %s, network_view: %s, comment: %s", + ip_address, + fixed_address_name, + network_view, + fixed_address_comment, + ) + elif ( + diffsync.config.fixed_address_type == FixedAddressTypeChoices.MAC_ADDRESS + and mac_address + and has_fixed_address + ): + diffsync.conn.create_fixed_address( + ip_address=ip_address, + name=fixed_address_name, + mac_address=mac_address, + match_client="MAC_ADDRESS", + comment=fixed_address_comment, + network_view=network_view, + ) + if diffsync.job.debug: + diffsync.job.logger.debug( + "Created fixed address with MAC, address: %s, name: %s, mac address: %s, network_view: %s, comment: %s", + ip_address, + fixed_address_name, + mac_address, + network_view, + fixed_address_comment, + ) - This requires the IP Address to either have a DNS name - """ - if attrs["dns_name"]: - diffsync.conn.create_host_record(attrs["dns_name"], ids["address"]) return super().create(ids=ids, diffsync=diffsync, attrs=attrs) - def update(self, attrs): + def update(self, attrs): # pylint: disable=too-many-branches """Update IP Address object in Infoblox.""" - json = {"configure_for_dns": False} - if attrs.get("description"): - json.update({"comment": attrs["description"]}) + ids = self.get_identifiers() + inf_attrs = self.get_attrs() + ip_address = ids["address"] + network_view = map_network_view_to_namespace(value=ids["namespace"], direction="ns_to_nv") + + mac_address = attrs.get("mac_address") + fixed_address_name = attrs.get("description") or "" + fixed_address_comment = attrs.get("fixed_address_comment") or "" + + # Attempt update of a fixed address if Infoblox has one already + if inf_attrs.get("has_fixed_address"): + fa_update_data = {} + if "description" in attrs: + fa_update_data["name"] = fixed_address_name + if "fixed_address_comment" in attrs: + fa_update_data["comment"] = fixed_address_comment + + if ( + self.diffsync.config.fixed_address_type == FixedAddressTypeChoices.RESERVED + and self.fixed_address_type == "RESERVED" + and fa_update_data + ): + self.diffsync.conn.update_fixed_address(ref=self.fixed_address_ref, data=fa_update_data) + if self.diffsync.job.debug: + self.diffsync.job.logger.debug( + "Updated fixed address reservation, address: %s, network_view: %s, update data: %s", + ip_address, + network_view, + fa_update_data, + extra={"grouping": "update"}, + ) + elif ( + self.diffsync.config.fixed_address_type == FixedAddressTypeChoices.MAC_ADDRESS + and self.fixed_address_type == "MAC_ADDRESS" + and (fa_update_data or mac_address) + ): + if mac_address: + fa_update_data["mac"] = mac_address + self.diffsync.conn.update_fixed_address(ref=self.fixed_address_ref, data=fa_update_data) + if self.diffsync.job.debug: + self.diffsync.job.logger.debug( + "Updated fixed address with MAC, address: %s, network_view: %s, update data: %s", + ip_address, + network_view, + fa_update_data, + extra={"grouping": "update"}, + ) + # IP Address exists in Infoblox without Fixed Address object. Nautobot side is asking for Fixed Address so we need to create one. + elif ( + attrs.get("has_fixed_address") + and self.diffsync.config.fixed_address_type != FixedAddressTypeChoices.DONT_CREATE_RECORD + ): + if self.diffsync.config.fixed_address_type == FixedAddressTypeChoices.RESERVED: + self.diffsync.conn.create_fixed_address( + ip_address=ip_address, + name=fixed_address_name, + comment=fixed_address_comment, + match_client="RESERVED", + network_view=network_view, + ) + if self.diffsync.job.debug: + self.diffsync.job.logger.debug( + "Created fixed address reservation, address: %s, name: %s, network_view: %s, comment: %s", + ip_address, + fixed_address_name, + network_view, + fixed_address_comment, + extra={"grouping": "update"}, + ) + elif self.diffsync.config.fixed_address_type == FixedAddressTypeChoices.MAC_ADDRESS and mac_address: + self.diffsync.conn.create_fixed_address( + ip_address=ip_address, + name=fixed_address_name, + mac_address=mac_address, + comment=fixed_address_comment, + match_client="MAC_ADDRESS", + network_view=network_view, + ) + if self.diffsync.job.debug: + self.diffsync.job.logger.debug( + "Created fixed address with MAC, address: %s, name: %s, mac address: %s, network_view: %s, comment: %s", + ip_address, + fixed_address_name, + mac_address, + network_view, + fixed_address_comment, + extra={"grouping": "update"}, + ) + + return super().update(attrs) + + def delete(self): + """Delete Fixed Address in Infoblox.""" + if InfobloxDeletableModelChoices.FIXED_ADDRESS not in self.diffsync.config.infoblox_deletable_models: + return super().delete() + + if self.diffsync.config.fixed_address_type == FixedAddressTypeChoices.DONT_CREATE_RECORD: + return super().delete() + + network_view = map_network_view_to_namespace(value=self.namespace, direction="ns_to_nv") + self.diffsync.conn.delete_fixed_address_record_by_ref(self.fixed_address_ref) + self.diffsync.job.logger.info( + "Deleted Fixed Address record in Infoblox, address: %s, network_view: %s", + self.address, + network_view, + ) + return super().delete() + + +class InfobloxNamespace(Namespace): + """Infoblox implementation of the Namespace model.""" + + @classmethod + def create(cls, diffsync, ids, attrs): + """Don't allow creating Network Views in Infoblox.""" + diffsync.job.logger.error( + f"Creating Network Views in Infoblox is not allowed. Nautobot Namespace: {ids['name']}" + ) + raise NotImplementedError + + def update(self, attrs): + """Don't allow updating Network Views in Infoblox.""" + self.diffsync.job.logger.error( + f"Updating Network Views in Infoblox is not allowed. Nautobot Namespace: {self.get_identifiers()['name']}" + ) + raise NotImplementedError + + def delete(self): + """Don't allow deleting Network Views in Infoblox.""" + self.diffsync.job.logger.error( + f"Deleting Network Views in Infoblox is not allowed. Nautobot Namespace: {self.get_identifiers()['name']}" + ) + raise NotImplementedError + + +class InfobloxDnsARecord(DnsARecord): + """Infoblox implementation of the DnsARecord Model.""" + + @classmethod + def create(cls, diffsync, ids, attrs): + """Create DNS A record in Infoblox.""" + # DNS record not needed, we can return + if diffsync.config.dns_record_type not in ( + DNSRecordTypeChoices.A_RECORD, + DNSRecordTypeChoices.A_AND_PTR_RECORD, + ): + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + network_view = map_network_view_to_namespace(value=ids["namespace"], direction="ns_to_nv") + ip_address = ids["address"] + dns_name = attrs.get("dns_name") + dns_comment = attrs.get("description") + if not dns_name: + diffsync.job.logger.warning( + f"Cannot create Infoblox DNS A record for IP Address {ip_address}. DNS name is not defined." + ) + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + # Nautobot side doesn't check if dns name is a FQDN. Additionally, Infoblox won't accept DNS name if the corresponding zone FQDN doesn't exist. + if not validate_dns_name(diffsync.conn, dns_name, network_view): + diffsync.job.logger.warning(f"Invalid zone fqdn in DNS name `{dns_name}` for IP Address {ip_address}.") + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + diffsync.conn.create_a_record(dns_name, ip_address, dns_comment, network_view=network_view) + if diffsync.job.debug: + diffsync.job.logger.debug( + "Created DNS A record, address: %s, dns_name: %s, network_view: %s, comment: %s", + ip_address, + dns_name, + network_view, + dns_comment, + ) + + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + def update(self, attrs): + """Update DNS A record in Infoblox.""" + # DNS record not needed, we can return + if self.diffsync.config.dns_record_type not in ( + DNSRecordTypeChoices.A_RECORD, + DNSRecordTypeChoices.A_AND_PTR_RECORD, + ): + return super().update(attrs) + + network_view = map_network_view_to_namespace(value=self.namespace, direction="ns_to_nv") + dns_payload = {} + dns_comment = attrs.get("description") + if dns_comment: + dns_payload["comment"] = dns_comment if attrs.get("dns_name"): - json.update({"name": attrs["dns_name"]}) - if json: - self.diffsync.conn.update_ipaddress(ip_address=self.get_identifiers()["address"], data=json) + # Nautobot side doesn't check if dns name is a FQDN. Additionally, Infoblox won't accept DNS name if the corresponding zone FQDN doesn't exist. + if not validate_dns_name(self.diffsync.conn, attrs.get("dns_name"), network_view): + self.diffsync.job.logger.warning( + f"Invalid zone fqdn in DNS name `{attrs.get('dns_name')}` for IP Address {self.address}." + ) + return super().update(attrs) + + dns_payload["name"] = attrs.get("dns_name") + + if dns_payload: + self.diffsync.conn.update_a_record(ref=self.ref, data=dns_payload) + if self.diffsync.job.debug: + self.diffsync.job.logger.debug( + "Updated A record, address: %s, network_view: %s, update data: %s", + self.address, + network_view, + dns_payload, + ) + return super().update(attrs) - # def delete(self): - # """Delete an IP Address from Infoblox.""" - # self.diffsync.conn.delete_host_record(self.get_identifiers()["address"]) - # return super().delete() + def delete(self): + """Delete A Record in Infoblox.""" + if InfobloxDeletableModelChoices.DNS_A_RECORD not in self.diffsync.config.infoblox_deletable_models: + return super().delete() + + if self.diffsync.config.dns_record_type not in ( + DNSRecordTypeChoices.A_RECORD, + DNSRecordTypeChoices.A_AND_PTR_RECORD, + ): + return super().delete() + + network_view = map_network_view_to_namespace(value=self.namespace, direction="ns_to_nv") + self.diffsync.conn.delete_a_record_by_ref(self.ref) + self.diffsync.job.logger.info( + "Deleted A record in Infoblox, address: %s, network_view: %s", + self.address, + network_view, + ) + return super().delete() + + +class InfobloxDnsHostRecord(DnsHostRecord): + """Infoblox implementation of the DnsHostRecord Model.""" + + @classmethod + def create(cls, diffsync, ids, attrs): + """Create DNS Host record in Infoblox.""" + # DNS record not needed, we can return + if diffsync.config.dns_record_type != DNSRecordTypeChoices.HOST_RECORD: + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + network_view = map_network_view_to_namespace(value=ids["namespace"], direction="ns_to_nv") + ip_address = ids["address"] + dns_name = attrs.get("dns_name") + dns_comment = attrs.get("description") + if not dns_name: + diffsync.job.logger.warning( + f"Cannot create Infoblox DNS Host record for IP Address {ip_address}. DNS name is not defined." + ) + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + # Nautobot side doesn't check if dns name is a FQDN. Additionally, Infoblox won't accept DNS name if the corresponding zone FQDN doesn't exist. + if not validate_dns_name(diffsync.conn, dns_name, network_view): + diffsync.job.logger.warning(f"Invalid zone fqdn in DNS name `{dns_name}` for IP Address {ip_address}.") + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + diffsync.conn.create_host_record(dns_name, ip_address, dns_comment, network_view=network_view) + if diffsync.job.debug: + diffsync.job.logger.debug( + "Created DNS Host record, address: %s, dns_name: %s, network_view: %s, comment: %s", + ip_address, + dns_name, + network_view, + dns_comment, + ) + + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + def update(self, attrs): + """Update DNS Host record in Infoblox.""" + # DNS record not needed, we can return + if self.diffsync.config.dns_record_type != DNSRecordTypeChoices.HOST_RECORD: + return super().update(attrs) + + network_view = map_network_view_to_namespace(value=self.namespace, direction="ns_to_nv") + dns_payload = {} + dns_comment = attrs.get("description") + if dns_comment: + dns_payload["comment"] = dns_comment + if attrs.get("dns_name"): + # Nautobot side doesn't check if dns name is a FQDN. Additionally, Infoblox won't accept DNS name if the corresponding zone FQDN doesn't exist. + if not validate_dns_name(self.diffsync.conn, attrs.get("dns_name"), network_view): + self.diffsync.job.logger.warning( + f"Invalid zone fqdn in DNS name `{attrs.get('dns_name')}` for IP Address {self.address}." + ) + return super().update(attrs) + + dns_payload["name"] = attrs.get("dns_name") + + if dns_payload: + self.diffsync.conn.update_host_record(ref=self.ref, data=dns_payload) + if self.diffsync.job.debug: + self.diffsync.job.logger.debug( + "Updated Host record, address: %s, network_view: %s, update data: %s", + self.address, + network_view, + dns_payload, + ) + + return super().update(attrs) + + def delete(self): + """Delete DNS Host record in Infoblox.""" + if InfobloxDeletableModelChoices.DNS_HOST_RECORD not in self.diffsync.config.infoblox_deletable_models: + return super().delete() + + if self.diffsync.config.dns_record_type != DNSRecordTypeChoices.HOST_RECORD: + return super().delete() + + network_view = map_network_view_to_namespace(value=self.namespace, direction="ns_to_nv") + self.diffsync.conn.delete_host_record_by_ref(self.ref) + self.diffsync.job.logger.info( + "Deleted Host record in Infoblox, address: %s, network_view: %s", + self.address, + network_view, + ) + return super().delete() + + +class InfobloxDnsPTRRecord(DnsPTRRecord): + """Infoblox implementation of the DnsPTRRecord Model.""" + + @classmethod + def create(cls, diffsync, ids, attrs): + """Create PTR record in Infoblox.""" + # DNS record not needed, we can return + if diffsync.config.dns_record_type != DNSRecordTypeChoices.A_AND_PTR_RECORD: + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + network_view = map_network_view_to_namespace(value=ids["namespace"], direction="ns_to_nv") + ip_address = ids["address"] + dns_name = attrs.get("dns_name") + dns_comment = attrs.get("description") + if not dns_name: + diffsync.job.logger.warning( + f"Cannot create Infoblox PTR DNS record for IP Address {ip_address}. DNS name is not defined." + ) + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + # Nautobot side doesn't check if dns name is a FQDN. Additionally, Infoblox won't accept DNS name if the corresponding zone FQDN doesn't exist. + if not validate_dns_name(diffsync.conn, dns_name, network_view): + diffsync.job.logger.warning(f"Invalid zone fqdn in DNS name `{dns_name}` for IP Address {ip_address}.") + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + diffsync.conn.create_ptr_record(dns_name, ip_address, dns_comment, network_view=network_view) + if diffsync.job.debug: + diffsync.job.logger.debug( + "Created DNS PTR record, address: %s, dns_name: %s, network_view: %s, comment: %s", + ip_address, + dns_name, + network_view, + dns_comment, + ) + + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + def update(self, attrs): + """Update PTR record in Infoblox.""" + if not self.diffsync.config.dns_record_type == DNSRecordTypeChoices.A_AND_PTR_RECORD: + return super().update(attrs) + + network_view = map_network_view_to_namespace(value=self.namespace, direction="ns_to_nv") + dns_payload = {} + dns_comment = attrs.get("description") + if dns_comment: + dns_payload["comment"] = dns_comment + if attrs.get("dns_name"): + # Nautobot side doesn't check if dns name is a FQDN. Additionally, Infoblox won't accept DNS name if the corresponding zone FQDN doesn't exist. + if not validate_dns_name(self.diffsync.conn, attrs.get("dns_name"), network_view): + self.diffsync.job.logger.warning( + f"Invalid zone fqdn in DNS name `{attrs.get('dns_name')}` for IP Address {self.address}." + ) + return super().update(attrs) + + dns_payload["ptrdname"] = attrs.get("dns_name") + + if dns_payload: + self.diffsync.conn.update_ptr_record(ref=self.ref, data=dns_payload) + if self.diffsync.job.debug: + self.diffsync.job.logger.debug( + "Updated PTR record, address: %s, network_view: %s, update data: %s", + self.address, + network_view, + dns_payload, + ) + + return super().update(attrs) + + def delete(self): + """Delete PTR Record in Infoblox.""" + if InfobloxDeletableModelChoices.DNS_PTR_RECORD not in self.diffsync.config.infoblox_deletable_models: + return super().delete() + + if not self.diffsync.config.dns_record_type == DNSRecordTypeChoices.A_AND_PTR_RECORD: + return super().delete() + + network_view = map_network_view_to_namespace(value=self.namespace, direction="ns_to_nv") + self.diffsync.conn.delete_ptr_record_by_ref(self.ref) + self.diffsync.job.logger.info( + "Deleted PTR record in Infoblox, address: %s, network_view: %s", + self.address, + network_view, + ) + return super().delete() diff --git a/nautobot_ssot/integrations/infoblox/diffsync/models/nautobot.py b/nautobot_ssot/integrations/infoblox/diffsync/models/nautobot.py index adb57fe5d..907901c3d 100644 --- a/nautobot_ssot/integrations/infoblox/diffsync/models/nautobot.py +++ b/nautobot_ssot/integrations/infoblox/diffsync/models/nautobot.py @@ -4,16 +4,34 @@ from django.core.exceptions import ValidationError from django.utils.text import slugify from nautobot.extras.choices import CustomFieldTypeChoices -from nautobot.extras.models import RelationshipAssociation as OrmRelationshipAssociation from nautobot.extras.models import CustomField as OrmCF +from nautobot.extras.models import RelationshipAssociation as OrmRelationshipAssociation from nautobot.ipam.choices import IPAddressRoleChoices, IPAddressTypeChoices +from nautobot.ipam.models import VLAN as OrmVlan from nautobot.ipam.models import IPAddress as OrmIPAddress +from nautobot.ipam.models import Namespace as OrmNamespace from nautobot.ipam.models import Prefix as OrmPrefix -from nautobot.ipam.models import VLAN as OrmVlan from nautobot.ipam.models import VLANGroup as OrmVlanGroup -from nautobot_ssot.integrations.infoblox.constant import PLUGIN_CFG -from nautobot_ssot.integrations.infoblox.diffsync.models.base import Network, IPAddress, Vlan, VlanView -from nautobot_ssot.integrations.infoblox.utils.diffsync import create_tag_sync_from_infoblox + +from nautobot_ssot.integrations.infoblox.choices import ( + DNSRecordTypeChoices, + FixedAddressTypeChoices, + NautobotDeletableModelChoices, +) +from nautobot_ssot.integrations.infoblox.diffsync.models.base import ( + DnsARecord, + DnsHostRecord, + DnsPTRRecord, + IPAddress, + Namespace, + Network, + Vlan, + VlanView, +) +from nautobot_ssot.integrations.infoblox.utils.diffsync import ( + create_tag_sync_from_infoblox, + map_network_view_to_namespace, +) from nautobot_ssot.integrations.infoblox.utils.nautobot import get_prefix_vlans @@ -95,22 +113,79 @@ def process_ext_attrs(diffsync, obj: object, extattrs: dict): # pylint: disable obj.custom_field_data.update({_cf_dict["key"]: str(attr_value)}) +def _create_ip_address_common(diffsync: object, ids: dict, attrs: dict) -> IPAddress: + """Creates common IP Address atrributes. + + Args: + diffsync (object): diffsync adapter instance + ids (dict): IP Address identifiers + attrs (dict): IP Address attributes + + Returns: + Partially instantiated IPAddress object + """ + try: + status = diffsync.status_map[attrs["status"]] + except KeyError: + status = diffsync.config.default_status.pk + addr = f"{ids['address']}/{ids['prefix_length']}" + if attrs.get("ip_addr_type"): + if attrs["ip_addr_type"].lower() in IPAddressTypeChoices.as_dict(): + ip_addr_type = attrs["ip_addr_type"].lower() + else: + diffsync.logger.warning( + f"unable to determine IPAddress Type for {addr}, defaulting to 'Host'", + extra={"grouping": "create"}, + ) + ip_addr_type = "host" + else: + ip_addr_type = "host" + _ip = OrmIPAddress( + address=addr, + status_id=status, + type=ip_addr_type, + parent_id=diffsync.prefix_map[(ids["namespace"], ids["prefix"])], + ) + if attrs.get("ext_attrs"): + process_ext_attrs(diffsync=diffsync, obj=_ip, extattrs=attrs["ext_attrs"]) + _ip.tags.add(create_tag_sync_from_infoblox()) + + return _ip + + +def _get_ip_address_ds_key(address: object) -> tuple: + """Get IP Address key used to find out PK of the IP Address objects. + + Args: + address (object): Diffsync IPAddress object + + Returns: + tuple containing key to the dict + """ + ip_address_key = (f"{address.address}/{address.prefix_length}", address.namespace) + + return ip_address_key + + class NautobotNetwork(Network): """Nautobot implementation of the Network Model.""" @classmethod def create(cls, diffsync, ids, attrs): """Create Prefix object in Nautobot.""" + namespace_name = map_network_view_to_namespace(value=ids["namespace"], direction="nv_to_ns") _prefix = OrmPrefix( prefix=ids["network"], status_id=diffsync.status_map["Active"], type=attrs["network_type"], description=attrs.get("description", ""), + namespace_id=diffsync.namespace_map[namespace_name], ) prefix_ranges = attrs.get("ranges") if prefix_ranges: _prefix.cf["dhcp_ranges"] = ",".join(prefix_ranges) - if attrs.get("vlans"): + # Only attempt associating to VLANs if they were actually loaded + if attrs.get("vlans") and diffsync.vlan_map: relation = diffsync.relationship_map["Prefix -> VLAN"] for _, _vlan in attrs["vlans"].items(): index = 0 @@ -136,7 +211,7 @@ def create(cls, diffsync, ids, attrs): process_ext_attrs(diffsync=diffsync, obj=_prefix, extattrs=attrs["ext_attrs"]) _prefix.tags.add(create_tag_sync_from_infoblox()) _prefix.validated_save() - diffsync.prefix_map[ids["network"]] = _prefix.id + diffsync.prefix_map[(ids["namespace"], ids["network"])] = _prefix.id return super().create(ids=ids, diffsync=diffsync, attrs=attrs) def update(self, attrs): # pylint: disable=too-many-branches @@ -153,7 +228,8 @@ def update(self, attrs): # pylint: disable=too-many-branches prefix_ranges = attrs.get("ranges") if prefix_ranges: _pf.cf["dhcp_ranges"] = ",".join(prefix_ranges) - if "vlans" in attrs: # pylint: disable=too-many-nested-blocks + # Only attempt associating to VLANs if they were actually loaded + if "vlans" in attrs and self.diffsync.vlan_map: # pylint: disable=too-many-nested-blocks current_vlans = get_prefix_vlans(prefix=_pf) if len(current_vlans) < len(attrs["vlans"]): for _, item in attrs["vlans"].items(): @@ -191,62 +267,70 @@ def update(self, attrs): # pylint: disable=too-many-branches _pf.validated_save() return super().update(attrs) - # def delete(self): - # """Delete Prefix object in Nautobot.""" - # self.diffsync.job.logger.warning(f"Prefix {self.network} will be deleted.") - # _prefix = OrmPrefix.objects.get(id=self.pk) - # _prefix.delete() - # return super().delete() - class NautobotIPAddress(IPAddress): """Nautobot implementation of the IPAddress Model.""" @classmethod def create(cls, diffsync, ids, attrs): - """Create IPAddress object in Nautobot.""" - try: - status = diffsync.status_map[attrs["status"]] - except KeyError: - status = diffsync.status_map[PLUGIN_CFG.get("default_status", "Active")] - addr = f"{ids['address']}/{ids['prefix_length']}" - if attrs.get("ip_addr_type"): - if attrs["ip_addr_type"].lower() in IPAddressTypeChoices.as_dict(): - ip_addr_type = attrs["ip_addr_type"].lower() - else: - diffsync.logger.warning(f"unable to determine IPAddress Type for {addr}, defaulting to 'Host'") - ip_addr_type = "host" + """Create IPAddress object in Nautobot. Used for fixed address data only.""" + # Infoblox side doesn't have a fixed address record + if not attrs.get("has_fixed_address", False): + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + addr_w_pfxl = f"{ids['address']}/{ids['prefix_length']}" + if diffsync.config.fixed_address_type == FixedAddressTypeChoices.DONT_CREATE_RECORD: + diffsync.job.logger.warning( + f"Did not create Fixed Address {addr_w_pfxl}-{ids['namespace']}. It exists in Infoblox but Nautobot config has `fixed_address_type` set to `DONT_CREATE_RECORD`." + ) + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + if diffsync.job.debug: - diffsync.job.logger.debug(f"Creating IP Address {addr}") - _ip = OrmIPAddress( - address=addr, - status_id=status, - type=ip_addr_type, - description=attrs.get("description", ""), - dns_name=attrs.get("dns_name", ""), - parent_id=diffsync.prefix_map[ids["prefix"]], - ) - if attrs.get("ext_attrs"): - process_ext_attrs(diffsync=diffsync, obj=_ip, extattrs=attrs["ext_attrs"]) + diffsync.job.logger.debug(f"Creating IP Address {addr_w_pfxl}") + _ip = _create_ip_address_common(diffsync, ids, attrs) + _ip.description = attrs.get("description") or "" + if "mac_address" in attrs: + _ip.custom_field_data.update({"mac_address": attrs.get("mac_address", "")}) + if "fixed_address_comment" in attrs: + _ip.custom_field_data.update({"fixed_address_comment": attrs.get("fixed_address_comment") or ""}) + try: - _ip.tags.add(create_tag_sync_from_infoblox()) _ip.validated_save() - diffsync.ipaddr_map[_ip.address] = _ip.id + diffsync.ipaddr_map[(f"{addr_w_pfxl}", ids["namespace"])] = _ip.id return super().create(ids=ids, diffsync=diffsync, attrs=attrs) except ValidationError as err: - diffsync.job.logger.warning( - f"Error with validating IP Address {ids['address']}/{ids['prefix_length']}. {err}" - ) + diffsync.job.logger.warning(f"Error with validating IP Address {addr_w_pfxl}-{ids['namespace']}. {err}") return None - def update(self, attrs): + def update(self, attrs): # pylint: disable=too-many-branches """Update IPAddress object in Nautobot.""" + # Description field should only be used by Fixed Address. + # If description is cleared in Infoblox diffsync record it either means fixed address is gone or name was removed. + # Either way we clear the field in Nautobot even if DONT_CREATE_RECORD is set. + if attrs.get("description") == "" and FixedAddressTypeChoices.DONT_CREATE_RECORD: + _ipaddr = OrmIPAddress.objects.get(id=self.pk) + _ipaddr.description = attrs["description"] + _ipaddr.custom_field_data.update({"fixed_address_comment": attrs.get("fixed_address_comment") or ""}) + try: + _ipaddr.validated_save() + return super().update(attrs) + except ValidationError as err: + self.diffsync.job.logger.warning(f"Error with updating IP Address {self.address}. {err}") + return None + + if self.diffsync.config.fixed_address_type == FixedAddressTypeChoices.DONT_CREATE_RECORD: + self.diffsync.job.logger.warning( + f"Did not update Fixed Address {self.address}/{self.prefix_length}-{self.namespace}. " # nosec: B608 + "It exists in Infoblox but Nautobot config has `fixed_address_type` set to `DONT_CREATE_RECORD`." + ) + return super().update(attrs) + _ipaddr = OrmIPAddress.objects.get(id=self.pk) if attrs.get("status"): try: status = self.diffsync.status_map[attrs["status"]] except KeyError: - status = self.diffsync.status_map[PLUGIN_CFG.get("default_status", "Active")] + status = self.diffsync.config.default_status.pk _ipaddr.status_id = status if attrs.get("ip_addr_type"): if attrs["ip_addr_type"].lower() in IPAddressTypeChoices.as_dict(): @@ -255,10 +339,12 @@ def update(self, attrs): _ipaddr.type = "host" if attrs.get("description"): _ipaddr.description = attrs["description"] - if attrs.get("dns_name"): - _ipaddr.dns_name = attrs["dns_name"] if "ext_attrs" in attrs: process_ext_attrs(diffsync=self.diffsync, obj=_ipaddr, extattrs=attrs["ext_attrs"]) + if "mac_address" in attrs: + _ipaddr.custom_field_data.update({"mac_address": attrs.get("mac_address", "")}) + if "fixed_address_comment" in attrs: + _ipaddr.custom_field_data.update({"fixed_address_comment": attrs.get("fixed_address_comment") or ""}) try: _ipaddr.validated_save() return super().update(attrs) @@ -266,12 +352,15 @@ def update(self, attrs): self.diffsync.job.logger.warning(f"Error with updating IP Address {self.address}. {err}") return None - # def delete(self): - # """Delete IPAddress object in Nautobot.""" - # self.diffsync.job.logger.warning(f"IP Address {self.address} will be deleted.") - # _ipaddr = OrmIPAddress.objects.get(id=self.pk) - # _ipaddr.delete() - # return super().delete() + def delete(self): + """Delete IPAddress object in Nautobot.""" + if NautobotDeletableModelChoices.IP_ADDRESS not in self.diffsync.config.nautobot_deletable_models: + return super().delete() + + _ipaddr = OrmIPAddress.objects.get(id=self.pk) + del self.diffsync.ipaddr_map[_get_ip_address_ds_key(self)] + _ipaddr.delete() + return super().delete() class NautobotVlanGroup(VlanView): @@ -299,6 +388,9 @@ def update(self, attrs): def delete(self): """Delete VLANGroup object in Nautobot.""" + if NautobotDeletableModelChoices.VLAN_GROUP not in self.diffsync.config.nautobot_deletable_models: + return super().delete() + self.diffsync.job.logger.warning(f"VLAN Group {self.name} will be deleted.") _vg = OrmVlanGroup.objects.get(id=self.pk) _vg.delete() @@ -362,7 +454,339 @@ def update(self, attrs): def delete(self): """Delete VLAN object in Nautobot.""" + if NautobotDeletableModelChoices.VLAN not in self.diffsync.config.nautobot_deletable_models: + return super().delete() + self.diffsync.job.logger.warning(f"VLAN {self.vid} will be deleted.") _vlan = OrmVlan.objects.get(id=self.pk) _vlan.delete() return super().delete() + + +class NautobotNamespace(Namespace): + """Nautobot implementation of the Namespace model.""" + + @classmethod + def create(cls, diffsync, ids, attrs): + """Create Namespace object in Nautobot.""" + _ns = OrmNamespace( + name=ids["name"], + ) + if attrs.get("ext_attrs"): + process_ext_attrs(diffsync=diffsync, obj=_ns, extattrs=attrs["ext_attrs"]) + try: + _ns.tags.add(create_tag_sync_from_infoblox()) + _ns.validated_save() + diffsync.namespace_map[ids["name"]] = _ns.id + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + except ValidationError as err: + diffsync.job.logger.warning(f"Unable to create Namespace {_ns.name}. {err}") + return None + + def update(self, attrs): + """Update Namespace object in Nautobot.""" + _ns = OrmNamespace.objects.get(id=self.pk) + if "ext_attrs" in attrs: + process_ext_attrs(diffsync=self.diffsync, obj=_ns, extattrs=attrs["ext_attrs"]) + try: + _ns.validated_save() + return super().update(attrs) + except ValidationError as err: + self.diffsync.job.logger.warning(f"Unable to update Namespace {_ns.name}. {err}") + return None + + def delete(self): + """Don't allow deleting Namespaces in Nautobot.""" + self.diffsync.job.logger.error( + f"Deleting Namespaces in Nautobot is not allowed. Infoblox Network View: {self.get_identifiers()['name']}" + ) + raise NotImplementedError + + +class NautobotDnsARecord(DnsARecord): + """Nautobot implementation of the DnsARecord Model.""" + + @classmethod + def create(cls, diffsync, ids, attrs): + """Create A Record data on IPAddress object in Nautobot.""" + addr_w_pfxl = f"{ids['address']}/{ids['prefix_length']}" + + if diffsync.config.dns_record_type not in ( + DNSRecordTypeChoices.A_RECORD, + DNSRecordTypeChoices.A_AND_PTR_RECORD, + ): + diffsync.job.logger.warning( + f"Can't create/update A record data for IP Address: {addr_w_pfxl}-{ids['namespace']}. Nautobot config is not set for A record operations." # nosec: B608 + ) + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + ip_pk = diffsync.ipaddr_map.get((addr_w_pfxl, ids["namespace"])) + if ip_pk: + if diffsync.job.debug: + diffsync.job.logger.debug( + f"Adding A record data to an existing IP Address: {addr_w_pfxl}-{ids['namespace']}." + ) + _ipaddr = OrmIPAddress.objects.get(id=ip_pk) + _ipaddr.dns_name = attrs.get("dns_name") or "" + _ipaddr.custom_field_data.update({"dns_a_record_comment": attrs.get("description") or ""}) + try: + _ipaddr.validated_save() + except ValidationError as err: + diffsync.job.logger.warning( + f"Error with updating A record data for IP Address: {addr_w_pfxl}-{ids['namespace']}. {err}" + ) + return None + else: + if diffsync.job.debug: + diffsync.job.logger.debug(f"Creating IP Address from A record data: {addr_w_pfxl}-{ids['namespace']}.") + try: + _ipaddr = _create_ip_address_common(diffsync, ids, attrs) + _ipaddr.dns_name = attrs.get("dns_name") or "" + _ipaddr.custom_field_data.update({"dns_a_record_comment": attrs.get("description") or ""}) + _ipaddr.validated_save() + diffsync.ipaddr_map[(addr_w_pfxl, ids["namespace"])] = _ipaddr.id + except ValidationError as err: + diffsync.job.logger.warning( + f"Error with creating IP Address from A record data: {addr_w_pfxl}-{ids['namespace']}. {err}" + ) + return None + + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + def update(self, attrs): + """Update A Record data on IPAddress object in Nautobot.""" + if self.diffsync.config.dns_record_type not in ( + DNSRecordTypeChoices.A_RECORD, + DNSRecordTypeChoices.A_AND_PTR_RECORD, + ): + self.diffsync.job.logger.warning( + f"Can't update A record data for IP Address: {self.address}/{self.prefix_length}-{self.namespace}. Nautobot config is not set for A record operations." # nosec: B608 + ) + return super().update(attrs) + + _ipaddr = OrmIPAddress.objects.get(id=self.pk) + if attrs.get("dns_name"): + _ipaddr.dns_name = attrs["dns_name"] + if "description" in attrs: + _ipaddr.custom_field_data.update({"dns_a_record_comment": attrs.get("description") or ""}) + try: + _ipaddr.validated_save() + return super().update(attrs) + except ValidationError as err: + self.diffsync.job.logger.warning( + f"Error with updating A record data for IP Address: {self.address}/{self.prefix_length}-{self.namespace}. {err}" + ) + return None + + def delete(self): + """Delete A Record data on IPAddress object in Nautobot.""" + if NautobotDeletableModelChoices.DNS_A_RECORD not in self.diffsync.config.nautobot_deletable_models: + return super().delete() + + if self.diffsync.config.dns_record_type not in ( + DNSRecordTypeChoices.A_RECORD, + DNSRecordTypeChoices.A_AND_PTR_RECORD, + ): + self.diffsync.job.logger.warning( + f"Can't delete A record data for IP Address: {self.address}/{self.prefix_length}-{self.namespace}. Nautobot config is not set for A record operations." + ) + return super().delete() + + # Parent record has been already deleted + if _get_ip_address_ds_key(self) not in self.diffsync.ipaddr_map: + return super().delete() + + _ipaddr = OrmIPAddress.objects.get(id=self.pk) + _ipaddr.dns_name = "" + _ipaddr.custom_field_data.update({"dns_a_record_comment": ""}) + try: + _ipaddr.validated_save() + return super().delete() + except ValidationError as err: + self.diffsync.job.logger.warning( + f"Error with deleting A record data for IP Address: {self.address}/{self.prefix_length}-{self.namespace}. {err}" + ) + return None + + +class NautobotDnsHostRecord(DnsHostRecord): + """Nautobot implementation of the DnsHostRecord Model.""" + + @classmethod + def create(cls, diffsync, ids, attrs): + """Create Host Record data on IPAddress object in Nautobot.""" + addr_w_pfxl = f"{ids['address']}/{ids['prefix_length']}" + + if diffsync.config.dns_record_type != DNSRecordTypeChoices.HOST_RECORD: + diffsync.job.logger.warning( + f"Can't create/update Host record data for IP Address: {addr_w_pfxl}-{ids['namespace']}. Nautobot config is not set for Host record operations." # nosec: B608 + ) + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + ip_pk = diffsync.ipaddr_map.get((addr_w_pfxl, ids["namespace"])) + if ip_pk: + if diffsync.job.debug: + diffsync.job.logger.debug( + f"Adding Host record data to an existing IP Address: {addr_w_pfxl}-{ids['namespace']}." + ) + _ipaddr = OrmIPAddress.objects.get(id=ip_pk) + _ipaddr.dns_name = attrs.get("dns_name") or "" + _ipaddr.custom_field_data.update({"dns_host_record_comment": attrs.get("description") or ""}) + try: + _ipaddr.validated_save() + except ValidationError as err: + diffsync.job.logger.warning( + f"Error with updating Host record data for IP Address: {addr_w_pfxl}-{ids['namespace']}. {err}" + ) + return None + else: + if diffsync.job.debug: + diffsync.job.logger.debug( + f"Creating IP Address from Host record data: {addr_w_pfxl}-{ids['namespace']}." + ) + try: + _ipaddr = _create_ip_address_common(diffsync, ids, attrs) + _ipaddr.dns_name = attrs.get("dns_name") or "" + _ipaddr.custom_field_data.update({"dns_host_record_comment": attrs.get("description") or ""}) + _ipaddr.validated_save() + diffsync.ipaddr_map[(addr_w_pfxl, ids["namespace"])] = _ipaddr.id + except ValidationError as err: + diffsync.job.logger.warning( + f"Error with creating IP Address from Host record data: {addr_w_pfxl}-{ids['namespace']}. {err}" + ) + return None + + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + def update(self, attrs): + """Update Host Record data on IPAddress object in Nautobot.""" + if self.diffsync.config.dns_record_type != DNSRecordTypeChoices.HOST_RECORD: + self.diffsync.job.logger.warning( + f"Can't update Host record data for IP Address: {self.address}/{self.prefix_length}-{self.namespace}. Nautobot config is not set for Host record operations." # nosec: B608 + ) + return super().update(attrs) + + _ipaddr = OrmIPAddress.objects.get(id=self.pk) + if "dns_name" in attrs: + _ipaddr.dns_name = attrs["dns_name"] + if "description" in attrs: + _ipaddr.custom_field_data.update({"dns_host_record_comment": attrs.get("description") or ""}) + try: + _ipaddr.validated_save() + return super().update(attrs) + except ValidationError as err: + self.diffsync.job.logger.warning( + f"Error with updating Host record data for IP Address: {self.address}/{self.prefix_length}-{self.namespace}. {err}" + ) + return None + + def delete(self): + """Delete Host Record data on IPAddress object in Nautobot.""" + if NautobotDeletableModelChoices.DNS_HOST_RECORD not in self.diffsync.config.nautobot_deletable_models: + return super().delete() + + if self.diffsync.config.dns_record_type != DNSRecordTypeChoices.HOST_RECORD: + self.diffsync.job.logger.warning( + f"Can't delete Host record data for IP Address: {self.address}/{self.prefix_length}-{self.namespace}. Nautobot config is not set for Host record operations." + ) + return super().delete() + + # Parent record has been already deleted + if _get_ip_address_ds_key(self) not in self.diffsync.ipaddr_map: + return super().delete() + + _ipaddr = OrmIPAddress.objects.get(id=self.pk) + _ipaddr.dns_name = "" + _ipaddr.custom_field_data.update({"dns_host_record_comment": ""}) + try: + _ipaddr.validated_save() + return super().delete() + except ValidationError as err: + self.diffsync.job.logger.warning( + f"Error with deleting Host record data for IP Address: {self.address}/{self.prefix_length}-{self.namespace}. {err}" + ) + return None + + +class NautobotDnsPTRRecord(DnsPTRRecord): + """Nautobot implementation of the DnsPTRRecord Model.""" + + @classmethod + def create(cls, diffsync, ids, attrs): + """Create PTR Record data on IPAddress object in Nautobot.""" + addr_w_pfxl = f"{ids['address']}/{ids['prefix_length']}" + + if diffsync.config.dns_record_type != DNSRecordTypeChoices.A_AND_PTR_RECORD: + diffsync.job.logger.warning( + f"Can't create/update PTR record data for IP Address: {addr_w_pfxl}-{ids['namespace']}. Nautobot config is not set for PTR record operations." # nosec: B608 + ) + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + ip_pk = diffsync.ipaddr_map.get((addr_w_pfxl, ids["namespace"])) + if ip_pk: + if diffsync.job.debug: + diffsync.job.logger.debug( + f"Adding PTR record data to an existing IP Address: {addr_w_pfxl}-{ids['namespace']}." + ) + _ipaddr = OrmIPAddress.objects.get(id=ip_pk) + _ipaddr.dns_name = attrs.get("dns_name") or "" + _ipaddr.custom_field_data.update({"dns_ptr_record_comment": attrs.get("description") or ""}) + try: + _ipaddr.validated_save() + except ValidationError as err: + diffsync.job.logger.warning( + f"Error with updating PTR record data for IP Address: {addr_w_pfxl}-{ids['namespace']}. {err}" + ) + return None + else: + # We don't allow creating IPs from PTR record only + diffsync.job.logger.warning( + f"Can't create PTR record on its own. Associated A record must be created for IP Address: {addr_w_pfxl}-{ids['namespace']}." + ) + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + def update(self, attrs): + """Update PTR Record data on IPAddress object in Nautobot.""" + if self.diffsync.config.dns_record_type != DNSRecordTypeChoices.A_AND_PTR_RECORD: + self.diffsync.job.logger.warning( + f"Can't update PTR record data for IP Address: {self.address}/{self.prefix_length}-{self.namespace}. Nautobot config is not set for PTR record operations." # nosec: B608 + ) + return super().update(attrs) + + _ipaddr = OrmIPAddress.objects.get(id=self.pk) + if "description" in attrs: + _ipaddr.custom_field_data.update({"dns_ptr_record_comment": attrs.get("description") or ""}) + try: + _ipaddr.validated_save() + return super().update(attrs) + except ValidationError as err: + self.diffsync.job.logger.warning( + f"Error with updating PTR record data for IP Address: {self.address}/{self.prefix_length}-{self.namespace}. {err}" + ) + return None + + def delete(self): + """Delete PTR Record data on IPAddress object in Nautobot.""" + if NautobotDeletableModelChoices.DNS_PTR_RECORD not in self.diffsync.config.nautobot_deletable_models: + return super().delete() + + if self.diffsync.config.dns_record_type != DNSRecordTypeChoices.A_AND_PTR_RECORD: + self.diffsync.job.logger.warning( + f"Can't delete PTR record data for IP Address: {self.address}/{self.prefix_length}-{self.namespace}. Nautobot config is not set for PTR record operations." + ) + return super().delete() + + # Parent record has been already deleted + if _get_ip_address_ds_key(self) not in self.diffsync.ipaddr_map: + return super().delete() + + _ipaddr = OrmIPAddress.objects.get(id=self.pk) + _ipaddr.custom_field_data.update({"dns_ptr_record_comment": ""}) + try: + _ipaddr.validated_save() + return super().delete() + except ValidationError as err: + self.diffsync.job.logger.warning( + f"Error with deleting PTR record data for IP Address: {self.address}/{self.prefix_length}-{self.namespace}. {err}" + ) + return None diff --git a/nautobot_ssot/integrations/infoblox/filters.py b/nautobot_ssot/integrations/infoblox/filters.py new file mode 100644 index 000000000..122b4a8b9 --- /dev/null +++ b/nautobot_ssot/integrations/infoblox/filters.py @@ -0,0 +1,28 @@ +"""Filtering implementation for SSOT Infoblox.""" + +import django_filters + +from django.db.models import Q +from nautobot.apps.filters import NautobotFilterSet + + +from .models import SSOTInfobloxConfig + + +class SSOTInfobloxConfigFilterSet(NautobotFilterSet): + """FilterSet for SSOTInfobloxConfig model.""" + + q = django_filters.CharFilter(method="search", label="Search") + + class Meta: + """Meta attributes for filter.""" + + model = SSOTInfobloxConfig + + fields = "__all__" + + def search(self, queryset, _name, value): + """String search of SSOTInfobloxConfig records.""" + if not value.strip(): + return queryset + return queryset.filter(Q(name__icontains=value)) # pylint: disable=unsupported-binary-operation diff --git a/nautobot_ssot/integrations/infoblox/forms.py b/nautobot_ssot/integrations/infoblox/forms.py new file mode 100644 index 000000000..c194cb18c --- /dev/null +++ b/nautobot_ssot/integrations/infoblox/forms.py @@ -0,0 +1,75 @@ +"""Forms implementation for SSOT Infoblox.""" + +from django import forms + +from nautobot.extras.forms import NautobotModelForm, NautobotFilterForm +from nautobot.apps.forms import add_blank_choice, JSONField, StaticSelect2, StaticSelect2Multiple + +from .models import SSOTInfobloxConfig +from .choices import ( + FixedAddressTypeChoices, + DNSRecordTypeChoices, + InfobloxDeletableModelChoices, + NautobotDeletableModelChoices, +) + + +class SSOTInfobloxConfigForm(NautobotModelForm): # pylint: disable=too-many-ancestors + """SSOTInfobloxConfig creation/edit form.""" + + infoblox_sync_filters = JSONField( + required=True, label="Infoblox Sync Filters", help_text="Filters controlling data loaded from both systems." + ) + infoblox_dns_view_mapping = JSONField( + required=False, + label="Infoblox Network View to DNS Mapping", + help_text="Maps Network View to a single DNS View. This DNS View is used when creating DNS records.", + ) + cf_fields_ignore = JSONField( + required=False, + label="Extensible Attributes/Custom Fields to Ignore", + help_text="Provide list of Extensible Attributes and Custom Fields to ignore during sync." + " Assign lists to keys `extensible_attributes` and `custom_fields`.", + ) + fixed_address_type = forms.ChoiceField( + choices=FixedAddressTypeChoices, + required=True, + label="Fixed Address type", + widget=StaticSelect2(), + ) + dns_record_type = forms.ChoiceField( + choices=DNSRecordTypeChoices, + required=True, + label="DNS record type", + widget=StaticSelect2(), + ) + infoblox_deletable_models = forms.MultipleChoiceField( + required=False, + label="Models that can be deleted in Infoblox", + choices=add_blank_choice(InfobloxDeletableModelChoices), + widget=StaticSelect2Multiple(), + ) + nautobot_deletable_models = forms.MultipleChoiceField( + required=False, + label="Models that can be deleted in Nautobot", + choices=add_blank_choice(NautobotDeletableModelChoices), + widget=StaticSelect2Multiple(), + ) + + class Meta: + """Meta attributes for the SSOTInfobloxConfigForm class.""" + + model = SSOTInfobloxConfig + fields = "__all__" + + +class SSOTInfobloxConfigFilterForm(NautobotFilterForm): + """Filter form for SSOTInfobloxConfig filter searches.""" + + model = SSOTInfobloxConfig + + class Meta: + """Meta attributes for the SSOTInfobloxConfigFilterForm class.""" + + model = SSOTInfobloxConfig + fields = "__all__" diff --git a/nautobot_ssot/integrations/infoblox/jobs.py b/nautobot_ssot/integrations/infoblox/jobs.py index 64578d187..156dcf2f9 100644 --- a/nautobot_ssot/integrations/infoblox/jobs.py +++ b/nautobot_ssot/integrations/infoblox/jobs.py @@ -3,26 +3,61 @@ from diffsync.enum import DiffSyncFlags from django.templatetags.static import static from django.urls import reverse +from nautobot.extras.choices import SecretsGroupAccessTypeChoices, SecretsGroupSecretTypeChoices from nautobot.extras.jobs import BooleanVar +from nautobot.apps.jobs import ObjectVar from nautobot_ssot.jobs.base import DataMapping, DataSource, DataTarget +from nautobot_ssot.models import SSOTInfobloxConfig from .diffsync.adapters import infoblox, nautobot from .utils.client import InfobloxApi -from .constant import PLUGIN_CFG name = "SSoT - Infoblox DDI" # pylint: disable=invalid-name +def _get_infoblox_client_config(app_config, debug): + """Get Infoblox client config from the Infoblox config instance.""" + username = app_config.infoblox_instance.secrets_group.get_secret_value( + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_USERNAME, + ) + password = app_config.infoblox_instance.secrets_group.get_secret_value( + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_PASSWORD, + ) + infoblox_client_config = { + "url": app_config.infoblox_instance.remote_url, + "username": username, + "password": password, + "verify_ssl": app_config.infoblox_instance.verify_ssl, + "wapi_version": app_config.infoblox_wapi_version, + "timeout": app_config.infoblox_instance.timeout, + "debug": debug, + "network_view_to_dns_map": app_config.infoblox_dns_view_mapping, + } + + return infoblox_client_config + + class InfobloxDataSource(DataSource): """Infoblox SSoT Data Source.""" debug = BooleanVar(description="Enable for verbose debug logging.") + config = ObjectVar( + model=SSOTInfobloxConfig, + display_field="SSOT Infoblox config", + required=True, + query_params={ + "enable_sync_to_nautobot": True, + "job_enabled": True, + }, + ) def __init__(self): """Initialize InfobloxDataSource.""" super().__init__() - self.diffsync_flags = DiffSyncFlags.CONTINUE_ON_FAILURE | DiffSyncFlags.SKIP_UNMATCHED_DST + self.diffsync_flags = DiffSyncFlags.CONTINUE_ON_FAILURE class Meta: # pylint: disable=too-few-public-methods """Information about the Job.""" @@ -36,6 +71,7 @@ class Meta: # pylint: disable=too-few-public-methods def data_mappings(cls): """Show mapping of models between Infoblox and Nautobot.""" return ( + DataMapping("network_view", None, "Namespace", reverse("ipam:namespace_list")), DataMapping("network", None, "Prefix", reverse("ipam:prefix_list")), DataMapping("ipaddress", None, "IP Address", reverse("ipam:ipaddress_list")), DataMapping("vlan", None, "VLAN", reverse("ipam:vlan_list")), @@ -45,15 +81,16 @@ def data_mappings(cls): def load_source_adapter(self): """Load Infoblox data.""" self.logger.info("Connecting to Infoblox") - client = InfobloxApi() - self.source_adapter = infoblox.InfobloxAdapter(job=self, sync=self.sync, conn=client) + client_config = _get_infoblox_client_config(self.config, self.debug) + client = InfobloxApi(**client_config) + self.source_adapter = infoblox.InfobloxAdapter(job=self, sync=self.sync, conn=client, config=self.config) self.logger.info("Loading data from Infoblox...") self.source_adapter.load() def load_target_adapter(self): """Load Nautobot data.""" self.logger.info("Connecting to Nautobot...") - self.target_adapter = nautobot.NautobotAdapter(job=self, sync=self.sync) + self.target_adapter = nautobot.NautobotAdapter(job=self, sync=self.sync, config=self.config) self.logger.info("Loading data from Nautobot...") self.target_adapter.load() @@ -61,6 +98,10 @@ def run(self, dryrun, memory_profiling, debug, *args, **kwargs): # pylint: disa """Perform data synchronization.""" self.debug = debug self.dryrun = dryrun + self.config = kwargs.get("config") + if not self.config.enable_sync_to_nautobot: + self.logger.error("Can't run sync to Nautobot, provided config doesn't have it enabled...") + raise ValueError("Config not enabled for sync to Nautobot.") self.memory_profiling = memory_profiling super().run(dryrun=self.dryrun, memory_profiling=self.memory_profiling, *args, **kwargs) @@ -69,11 +110,20 @@ class InfobloxDataTarget(DataTarget): """Infoblox SSoT Data Target.""" debug = BooleanVar(description="Enable for verbose debug logging.") + config = ObjectVar( + model=SSOTInfobloxConfig, + display_field="SSOT Infoblox config", + required=True, + query_params={ + "enable_sync_to_infoblox": True, + "job_enabled": True, + }, + ) def __init__(self): """Initialize InfobloxDataTarget.""" super().__init__() - self.diffsync_flags = DiffSyncFlags.CONTINUE_ON_FAILURE | DiffSyncFlags.SKIP_UNMATCHED_DST + self.diffsync_flags = DiffSyncFlags.CONTINUE_ON_FAILURE class Meta: # pylint: disable=too-few-public-methods """Information about the Job.""" @@ -87,6 +137,7 @@ class Meta: # pylint: disable=too-few-public-methods def data_mappings(cls): """Show mapping of models between Nautobot and Infoblox.""" return ( + DataMapping("Namespace", reverse("ipam:namespace_list"), "network_view", None), DataMapping("Prefix", reverse("ipam:prefix_list"), "network", None), DataMapping("IP Address", reverse("ipam:ipaddress_list"), "ipaddress", None), DataMapping("VLAN", reverse("ipam:vlan_list"), "vlan", None), @@ -96,15 +147,16 @@ def data_mappings(cls): def load_source_adapter(self): """Load Nautobot data.""" self.logger.info("Connecting to Nautobot...") - self.source_adapter = nautobot.NautobotAdapter(job=self, sync=self.sync) + self.source_adapter = nautobot.NautobotAdapter(job=self, sync=self.sync, config=self.config) self.logger.info("Loading data from Nautobot...") self.source_adapter.load() def load_target_adapter(self): """Load Infoblox data.""" self.logger.info("Connecting to Infoblox") - client = InfobloxApi() - self.target_adapter = infoblox.InfobloxAdapter(job=self, sync=self.sync, conn=client) + client_config = _get_infoblox_client_config(self.config, self.debug) + client = InfobloxApi(**client_config) + self.target_adapter = infoblox.InfobloxAdapter(job=self, sync=self.sync, conn=client, config=self.config) self.logger.info("Loading data from Infoblox...") self.target_adapter.load() @@ -112,11 +164,13 @@ def run(self, dryrun, memory_profiling, debug, *args, **kwargs): # pylint: disa """Perform data synchronization.""" self.debug = debug self.dryrun = dryrun + self.config = kwargs.get("config") + # Additional guard against launching sync to Infoblox with config that doesn't allow it + if not self.config.enable_sync_to_infoblox: + self.logger.error("Can't run sync to Infoblox, provided config doesn't have it enabled...") + raise ValueError("Config not enabled for sync to Infoblox.") self.memory_profiling = memory_profiling super().run(dryrun=self.dryrun, memory_profiling=self.memory_profiling, *args, **kwargs) -jobs = [InfobloxDataSource] - -if PLUGIN_CFG["enable_sync_to_infoblox"]: - jobs.append(InfobloxDataTarget) +jobs = [InfobloxDataSource, InfobloxDataTarget] diff --git a/nautobot_ssot/integrations/infoblox/models.py b/nautobot_ssot/integrations/infoblox/models.py new file mode 100644 index 000000000..7cf74891e --- /dev/null +++ b/nautobot_ssot/integrations/infoblox/models.py @@ -0,0 +1,310 @@ +"""Models implementation for SSOT Infoblox.""" + +import ipaddress + +from django.core.exceptions import ValidationError +from django.core.serializers.json import DjangoJSONEncoder +from django.db import models + +try: + from nautobot.apps.constants import CHARFIELD_MAX_LENGTH +except ImportError: + CHARFIELD_MAX_LENGTH = 255 + +from nautobot.core.models.generics import PrimaryModel +from nautobot.extras.choices import SecretsGroupAccessTypeChoices, SecretsGroupSecretTypeChoices +from nautobot.extras.models import SecretsGroupAssociation + +from nautobot_ssot.integrations.infoblox.choices import ( + DNSRecordTypeChoices, + FixedAddressTypeChoices, + InfobloxDeletableModelChoices, + NautobotDeletableModelChoices, +) + + +def _get_default_sync_filters(): + """Provides default value for SSOTInfobloxConfig infoblox_sync_filters field.""" + return [{"network_view": "default"}] + + +def _get_default_cf_fields_ignore(): + """Provides default value for SSOTInfobloxConfig cf_fields_ignore field.""" + return {"extensible_attributes": [], "custom_fields": []} + + +class SSOTInfobloxConfig(PrimaryModel): # pylint: disable=too-many-ancestors + """SSOT Infoblox Configuration model.""" + + name = models.CharField(max_length=CHARFIELD_MAX_LENGTH, unique=True) + description = models.CharField( + max_length=CHARFIELD_MAX_LENGTH, + blank=True, + ) + default_status = models.ForeignKey( + to="extras.Status", + on_delete=models.PROTECT, + verbose_name="Default Object Status", + help_text="Default Object Status", + ) + infoblox_instance = models.ForeignKey( + to="extras.ExternalIntegration", + on_delete=models.PROTECT, + verbose_name="Infoblox Instance Config", + help_text="Infoblox Instance", + ) + infoblox_wapi_version = models.CharField( + max_length=CHARFIELD_MAX_LENGTH, + default="v2.12", + verbose_name="Infoblox WAPI version", + ) + enable_sync_to_infoblox = models.BooleanField( + default=False, verbose_name="Sync to Infoblox", help_text="Enable syncing of data from Nautobot to Infoblox." + ) + enable_sync_to_nautobot = models.BooleanField( + default=True, verbose_name="Sync to Nautobot", help_text="Enable syncing of data from Infoblox to Nautobot." + ) + import_ip_addresses = models.BooleanField( + default=False, + verbose_name="Import IP Addresses", + ) + import_subnets = models.BooleanField( + default=False, + verbose_name="Import Networks", + ) + import_vlan_views = models.BooleanField( + default=False, + verbose_name="Import VLAN Views", + ) + import_vlans = models.BooleanField( + default=False, + verbose_name="Import VLANs", + ) + infoblox_sync_filters = models.JSONField(default=_get_default_sync_filters, encoder=DjangoJSONEncoder) + infoblox_dns_view_mapping = models.JSONField(default=dict, encoder=DjangoJSONEncoder, blank=True) + cf_fields_ignore = models.JSONField(default=_get_default_cf_fields_ignore, encoder=DjangoJSONEncoder, blank=True) + import_ipv4 = models.BooleanField( + default=True, + verbose_name="Import IPv4", + ) + import_ipv6 = models.BooleanField( + default=False, + verbose_name="Import IPv6", + ) + dns_record_type = models.CharField( + max_length=CHARFIELD_MAX_LENGTH, + default=DNSRecordTypeChoices.HOST_RECORD, + choices=DNSRecordTypeChoices, + verbose_name="DBS record type", + help_text="Choose what type of Infoblox DNS record to create for IP Addresses.", + ) + fixed_address_type = models.CharField( + max_length=CHARFIELD_MAX_LENGTH, + default=FixedAddressTypeChoices.DONT_CREATE_RECORD, + choices=FixedAddressTypeChoices, + help_text="Choose what type of Infoblox fixed IP Address record to create.", + ) + job_enabled = models.BooleanField( + default=False, + verbose_name="Enabled for Sync Job", + help_text="Enable use of this configuration in the sync jobs.", + ) + infoblox_deletable_models = models.JSONField( + encoder=DjangoJSONEncoder, + default=list, + blank=True, + help_text="Model types that can be deleted in Infoblox.", + ) + nautobot_deletable_models = models.JSONField( + encoder=DjangoJSONEncoder, default=list, blank=True, help_text="Model types that can be deleted in Nautobot." + ) + + class Meta: + """Meta class for SSOTInfobloxConfig.""" + + verbose_name = "SSOT Infoblox Config" + verbose_name_plural = "SSOT Infoblox Configs" + + def __str__(self): + """String representation of singleton instance.""" + return self.name + + def _clean_infoblox_sync_filters(self): # pylint: disable=too-many-branches + """Performs validation of the infoblox_sync_filters field.""" + allowed_keys = {"network_view", "prefixes_ipv4", "prefixes_ipv6"} + + if not isinstance(self.infoblox_sync_filters, list): + raise ValidationError({"infoblox_sync_filters": "Sync filters must be a list."}) + + if len(self.infoblox_sync_filters) == 0: + raise ValidationError( + { + "infoblox_sync_filters": 'At least one filter must be defined. You can use the default one: [{"network_view": "default"}]' + } + ) + + network_views = set() + for sync_filter in self.infoblox_sync_filters: + if not isinstance(sync_filter, dict): + raise ValidationError({"infoblox_sync_filters": "Sync filter must be a dict."}) + invalid_keys = set(sync_filter.keys()) - allowed_keys + if invalid_keys: + raise ValidationError( + {"infoblox_sync_filters": f"Invalid keys found in the sync filter: {', '.join(invalid_keys)}."} + ) + + if "network_view" not in sync_filter: + raise ValidationError({"infoblox_sync_filters": "Sync filter must have `network_view` key defined."}) + network_view = sync_filter["network_view"] + if not isinstance(network_view, str): + raise ValidationError({"infoblox_sync_filters": "Value of the `network_view` key must be a string."}) + + if network_view in network_views: + raise ValidationError( + { + "infoblox_sync_filters": f"Duplicate value for the `network_view` found: {sync_filter['network_view']}." + } + ) + network_views.add(network_view) + + if "prefixes_ipv4" in sync_filter: + if not isinstance(sync_filter["prefixes_ipv4"], list): + raise ValidationError({"infoblox_sync_filters": "Value of the `prefixes_ipv4` key must be a list."}) + if not sync_filter["prefixes_ipv4"]: + raise ValidationError( + {"infoblox_sync_filters": "Value of the `prefixes_ipv4` key must not be an empty list."} + ) + for prefix in sync_filter["prefixes_ipv4"]: + try: + if "/" not in prefix: + raise ValidationError( + { + "infoblox_sync_filters": f"IPv4 prefix must have a prefix length defined using `/` format: {prefix}." + } + ) + ipaddress.IPv4Network(prefix) + except (ValueError, TypeError) as error: + raise ValidationError( # pylint: disable=raise-missing-from + {"infoblox_sync_filters": f"IPv4 prefix parsing error: {str(error)}."} + ) + + if "prefixes_ipv6" in sync_filter: + if not isinstance(sync_filter["prefixes_ipv6"], list): + raise ValidationError({"infoblox_sync_filters": "Value of the `prefixes_ipv6` key must be a list."}) + if not sync_filter["prefixes_ipv6"]: + raise ValidationError( + {"infoblox_sync_filters": "Value of the `prefixes_ipv6` key must not be an empty list."} + ) + for prefix in sync_filter["prefixes_ipv6"]: + try: + if "/" not in prefix: + raise ValidationError( + { + "infoblox_sync_filters": f"IPv6 prefix must have a prefix length defined using `/` format: {prefix}." + } + ) + ipaddress.IPv6Network(prefix) + except (ValueError, TypeError) as error: + raise ValidationError( # pylint: disable=raise-missing-from + {"infoblox_sync_filters": f"IPv6 prefix parsing error: {str(error)}."} + ) + + def _clean_infoblox_instance(self): + """Performs validation of the infoblox_instance field.""" + if not self.infoblox_instance.secrets_group: + raise ValidationError({"infoblox_instance": "Infoblox instance must have Secrets groups assigned."}) + try: + self.infoblox_instance.secrets_group.get_secret_value( + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_USERNAME, + ) + except SecretsGroupAssociation.DoesNotExist: + raise ValidationError( # pylint: disable=raise-missing-from + { + "infoblox_instance": "Secrets group for the Infoblox instance must have secret with type Username and access type REST." + } + ) + try: + self.infoblox_instance.secrets_group.get_secret_value( + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_PASSWORD, + ) + except SecretsGroupAssociation.DoesNotExist: + raise ValidationError( # pylint: disable=raise-missing-from + { + "infoblox_instance": "Secrets group for the Infoblox instance must have secret with type Password and access type REST." + } + ) + + def _clean_import_ip(self): + """Performs validation of the import_ipv* fields.""" + if not (self.import_ipv4 or self.import_ipv6): + raise ValidationError( + { + "import_ipv4": "At least one of `import_ipv4` or `import_ipv6` must be set to True.", + "import_ipv6": "At least one of `import_ipv4` or `import_ipv6` must be set to True.", + } + ) + + def _clean_infoblox_dns_view_mapping(self): + """Performs validation of the infoblox_dns_view_mapping field.""" + if not isinstance(self.infoblox_dns_view_mapping, dict): + raise ValidationError( + { + "infoblox_dns_view_mapping": "`infoblox_dns_view_mapping` must be a dictionary mapping network view names to dns view names.", + }, + ) + + def _clean_cf_fields_ignore(self): + """Performs validation of the cf_fields_ignore field.""" + if not isinstance(self.cf_fields_ignore, dict): + raise ValidationError( + { + "cf_fields_ignore": "`cf_fields_ignore` must be a dictionary.", + }, + ) + for key, value in self.cf_fields_ignore.items(): + if key not in ( + "extensible_attributes", + "custom_fields", + ): + raise ValidationError( + { + "cf_fields_ignore": f"Invalid key name `{key}`. Only `extensible_attributes` and `custom_fields` are allowed.", + }, + ) + if not isinstance(value, list) or {type(el) for el in value} - {str}: + raise ValidationError( + { + "cf_fields_ignore": f"Value of key `{key}` must be a list of strings.", + }, + ) + + def _clean_deletable_model_types(self): + """Performs validation of infoblox_deletable_models and nautobot_deletable_models.""" + for model in self.infoblox_deletable_models: + if model not in InfobloxDeletableModelChoices.values(): + raise ValidationError( + { + "infoblox_deletable_models": f"Model `{model}` is not a valid choice.", + }, + ) + + for model in self.nautobot_deletable_models: + if model not in NautobotDeletableModelChoices.values(): + raise ValidationError( + { + "nautobot_deletable_models": f"Model `{model}` is not a valid choice.", + }, + ) + + def clean(self): + """Clean method for SSOTInfobloxConfig.""" + super().clean() + + self._clean_infoblox_sync_filters() + self._clean_infoblox_instance() + self._clean_import_ip() + self._clean_infoblox_dns_view_mapping() + self._clean_cf_fields_ignore() + self._clean_deletable_model_types() diff --git a/nautobot_ssot/integrations/infoblox/signals.py b/nautobot_ssot/integrations/infoblox/signals.py index 57667afdb..9fedb522a 100644 --- a/nautobot_ssot/integrations/infoblox/signals.py +++ b/nautobot_ssot/integrations/infoblox/signals.py @@ -2,9 +2,21 @@ # pylint: disable=duplicate-code +import ipaddress + from nautobot.core.signals import nautobot_database_ready -from nautobot.extras.choices import CustomFieldTypeChoices, RelationshipTypeChoices +from nautobot.extras.choices import ( + CustomFieldTypeChoices, + RelationshipTypeChoices, + SecretsGroupAccessTypeChoices, + SecretsGroupSecretTypeChoices, +) +from django.conf import settings from nautobot_ssot.integrations.infoblox.constant import TAG_COLOR +from nautobot_ssot.integrations.infoblox.choices import DNSRecordTypeChoices, FixedAddressTypeChoices + + +config = settings.PLUGINS_CONFIG["nautobot_ssot"] def register_signals(sender): @@ -12,7 +24,9 @@ def register_signals(sender): nautobot_database_ready.connect(nautobot_database_ready_callback, sender=sender) -def nautobot_database_ready_callback(sender, *, apps, **kwargs): # pylint: disable=unused-argument,too-many-locals +def nautobot_database_ready_callback( + sender, *, apps, **kwargs +): # pylint: disable=unused-argument,too-many-locals,too-many-statements """Create Tag and CustomField to note System of Record for SSoT. Callback function triggered by the nautobot_database_ready signal when the Nautobot database is fully ready. @@ -22,10 +36,17 @@ def nautobot_database_ready_callback(sender, *, apps, **kwargs): # pylint: disa CustomField = apps.get_model("extras", "CustomField") Prefix = apps.get_model("ipam", "Prefix") IPAddress = apps.get_model("ipam", "IPAddress") + Namespace = apps.get_model("ipam", "Namespace") Tag = apps.get_model("extras", "Tag") Relationship = apps.get_model("extras", "Relationship") + ExternalIntegration = apps.get_model("extras", "ExternalIntegration") + Secret = apps.get_model("extras", "Secret") + SecretsGroup = apps.get_model("extras", "SecretsGroup") + SecretsGroupAssociation = apps.get_model("extras", "SecretsGroupAssociation") + Status = apps.get_model("extras", "Status") VLAN = apps.get_model("ipam", "VLAN") VLANGroup = apps.get_model("ipam", "VLANGroup") + SSOTInfobloxConfig = apps.get_model("nautobot_ssot", "SSOTInfobloxConfig") tag_sync_from_infoblox, _ = Tag.objects.get_or_create( name="SSoT Synced from Infoblox", @@ -35,7 +56,7 @@ def nautobot_database_ready_callback(sender, *, apps, **kwargs): # pylint: disa "color": TAG_COLOR, }, ) - for model in [IPAddress, Prefix, VLAN]: + for model in [IPAddress, Namespace, Prefix, VLAN]: tag_sync_from_infoblox.content_types.add(ContentType.objects.get_for_model(model)) tag_sync_to_infoblox, _ = Tag.objects.get_or_create( name="SSoT Synced to Infoblox", @@ -65,6 +86,51 @@ def nautobot_database_ready_callback(sender, *, apps, **kwargs): # pylint: disa ) range_custom_field.content_types.add(ContentType.objects.get_for_model(Prefix)) + mac_address_custom_field, _ = CustomField.objects.get_or_create( + type=CustomFieldTypeChoices.TYPE_TEXT, + key="mac_address", + defaults={ + "label": "MAC Address", + }, + ) + mac_address_custom_field.content_types.add(ContentType.objects.get_for_model(IPAddress)) + + fixed_address_comment_custom_field, _ = CustomField.objects.get_or_create( + type=CustomFieldTypeChoices.TYPE_TEXT, + key="fixed_address_comment", + defaults={ + "label": "Fixed Address Comment", + }, + ) + fixed_address_comment_custom_field.content_types.add(ContentType.objects.get_for_model(IPAddress)) + + dns_a_record_comment_custom_field, _ = CustomField.objects.get_or_create( + type=CustomFieldTypeChoices.TYPE_TEXT, + key="dns_a_record_comment", + defaults={ + "label": "DNS A Record Comment", + }, + ) + dns_a_record_comment_custom_field.content_types.add(ContentType.objects.get_for_model(IPAddress)) + + dns_host_record_comment_custom_field, _ = CustomField.objects.get_or_create( + type=CustomFieldTypeChoices.TYPE_TEXT, + key="dns_host_record_comment", + defaults={ + "label": "DNS Host Record Comment", + }, + ) + dns_host_record_comment_custom_field.content_types.add(ContentType.objects.get_for_model(IPAddress)) + + dns_ptr_record_comment_custom_field, _ = CustomField.objects.get_or_create( + type=CustomFieldTypeChoices.TYPE_TEXT, + key="dns_ptr_record_comment", + defaults={ + "label": "DNS PTR Record Comment", + }, + ) + dns_ptr_record_comment_custom_field.content_types.add(ContentType.objects.get_for_model(IPAddress)) + # add Prefix -> VLAN Relationship relationship_dict = { "label": "Prefix -> VLAN", @@ -76,3 +142,121 @@ def nautobot_database_ready_callback(sender, *, apps, **kwargs): # pylint: disa "destination_label": "VLAN", } Relationship.objects.get_or_create(label=relationship_dict["label"], defaults=relationship_dict) + + # Migrate existing configuration to a configuration object + if not SSOTInfobloxConfig.objects.exists(): + default_status_name = str(config.get("infoblox_default_status", "")) + found_status = Status.objects.filter(name=default_status_name) + if found_status.exists(): + default_status = found_status.first() + else: + default_status, _ = Status.objects.get_or_create(name="Active") + + try: + infoblox_request_timeout = int(config.get("infoblox_request_timeout", 60)) + except ValueError: + infoblox_request_timeout = 60 + + infoblox_sync_filters = _get_sync_filters() + + secrets_group, _ = SecretsGroup.objects.get_or_create(name="InfobloxSSOTDefaultSecretGroup") + infoblox_username, _ = Secret.objects.get_or_create( + name="Infoblox Username - Default", + defaults={ + "provider": "environment-variable", + "parameters": {"variable": "NAUTOBOT_SSOT_INFOBLOX_USERNAME"}, + }, + ) + infoblox_password, _ = Secret.objects.get_or_create( + name="Infoblox Password - Default", + defaults={ + "provider": "environment-variable", + "parameters": {"variable": "NAUTOBOT_SSOT_INFOBLOX_PASSWORD"}, + }, + ) + SecretsGroupAssociation.objects.get_or_create( + secrets_group=secrets_group, + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_USERNAME, + defaults={ + "secret": infoblox_username, + }, + ) + SecretsGroupAssociation.objects.get_or_create( + secrets_group=secrets_group, + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_PASSWORD, + defaults={ + "secret": infoblox_password, + }, + ) + external_integration, _ = ExternalIntegration.objects.get_or_create( + name="DefaultInfobloxInstance", + defaults={ + "remote_url": str(config.get("infoblox_url", "https://replace.me.local")), + "secrets_group": secrets_group, + "verify_ssl": bool(config.get("infoblox_verify_ssl", True)), + "timeout": infoblox_request_timeout, + }, + ) + + SSOTInfobloxConfig.objects.create( + name="InfobloxConfigDefault", + description="Auto-generated default configuration.", + default_status=default_status, + infoblox_wapi_version=str(config.get("infoblox_wapi_version", "v2.12")), + infoblox_instance=external_integration, + enable_sync_to_infoblox=bool(config.get("infoblox_enable_sync_to_infoblox", False)), + enable_sync_to_nautobot=True, + import_ip_addresses=bool(config.get("infoblox_import_objects_ip_addresses", False)), + import_subnets=bool(config.get("infoblox_import_objects_subnets", False)), + import_vlan_views=bool(config.get("infoblox_import_objects_vlan_views", False)), + import_vlans=bool(config.get("infoblox_import_objects_vlans", False)), + import_ipv4=True, + import_ipv6=bool(config.get("infoblox_import_objects_subnets_ipv6", False)), + job_enabled=True, + infoblox_sync_filters=infoblox_sync_filters, + infoblox_dns_view_mapping={}, + cf_fields_ignore={"extensible_attributes": [], "custom_fields": []}, + fixed_address_type=FixedAddressTypeChoices.DONT_CREATE_RECORD, + dns_record_type=DNSRecordTypeChoices.HOST_RECORD, + ) + + +def _get_sync_filters(): + """Build sync filters from the existing config.""" + subnets_to_import = config.get("infoblox_import_subnets", []) + default_sync_filters = [{"network_view": "default"}] + ipv4_subnets = [] + ipv6_subnets = [] + if not subnets_to_import: + return default_sync_filters + if not isinstance(subnets_to_import, list): + return default_sync_filters + for subnet in subnets_to_import: + try: + ipaddress.IPv4Network(subnet) + ipv4_subnets.append(subnet) + except (ValueError, TypeError): + pass + try: + ipaddress.IPv6Network(subnet) + ipv6_subnets.append(subnet) + except (ValueError, TypeError): + pass + + sync_filter = {} + if ipv4_subnets: + sync_filter["prefixes_ipv4"] = ipv4_subnets + if ipv6_subnets: + sync_filter["prefixes_ipv6"] = ipv6_subnets + + network_view = str(config.get("infoblox_network_view", "")) + if network_view: + sync_filter["network_view"] = network_view + else: + sync_filter["network_view"] = "default" + + sync_filters = [sync_filter] + + return sync_filters diff --git a/nautobot_ssot/integrations/infoblox/tables.py b/nautobot_ssot/integrations/infoblox/tables.py new file mode 100644 index 000000000..58542c984 --- /dev/null +++ b/nautobot_ssot/integrations/infoblox/tables.py @@ -0,0 +1,41 @@ +"""Tables implementation for SSOT Infoblox.""" + +import django_tables2 as tables + +from nautobot.apps.tables import BaseTable, BooleanColumn, ButtonsColumn + +from .models import SSOTInfobloxConfig + + +class SSOTInfobloxConfigTable(BaseTable): + """Table for SSOTInfobloxConfig.""" + + name = tables.LinkColumn() + infoblox_url = tables.Column(accessor="infoblox_instance__remote_url") + enable_sync_to_infoblox = BooleanColumn(orderable=False) + enable_sync_to_nautobot = BooleanColumn(orderable=False) + import_subnets = BooleanColumn(orderable=False) + import_ip_addresses = BooleanColumn(orderable=False) + import_vlan_views = BooleanColumn(orderable=False) + import_vlans = BooleanColumn(orderable=False) + import_ipv4 = BooleanColumn(orderable=False) + import_ipv6 = BooleanColumn(orderable=False) + job_enabled = BooleanColumn(orderable=False) + actions = ButtonsColumn(SSOTInfobloxConfig, buttons=("changelog", "edit", "delete")) + + class Meta(BaseTable.Meta): + """Meta attributes.""" + + model = SSOTInfobloxConfig + fields = ( # pylint: disable=nb-use-fields-all + "name", + "infoblox_url", + "enable_sync_to_infoblox", + "import_subnets", + "import_ip_addresses", + "import_vlan_views", + "import_vlans", + "import_ipv4", + "import_ipv6", + "job_enabled", + ) diff --git a/nautobot_ssot/integrations/infoblox/urls.py b/nautobot_ssot/integrations/infoblox/urls.py new file mode 100644 index 000000000..0d3d67ea5 --- /dev/null +++ b/nautobot_ssot/integrations/infoblox/urls.py @@ -0,0 +1,27 @@ +"""URL patterns for nautobot-ssot-servicenow.""" + +from django.urls import path +from nautobot.apps.urls import NautobotUIViewSetRouter + +from . import views +from . import models + +router = NautobotUIViewSetRouter() +router.register("config/infoblox", viewset=views.SSOTInfobloxConfigUIViewSet) + +urlpatterns = [ + path( + "config/infoblox//changelog/", + views.SSOTInfobloxConfigChangeLogView.as_view(), + name="ssotinfobloxconfig_changelog", + kwargs={"model": models.SSOTInfobloxConfig}, + ), + path( + "config/infoblox//notes/", + views.SSOTInfobloxConfigNotesView.as_view(), + name="ssotinfobloxconfig_notes", + kwargs={"model": models.SSOTInfobloxConfig}, + ), +] + +urlpatterns += router.urls diff --git a/nautobot_ssot/integrations/infoblox/utils/client.py b/nautobot_ssot/integrations/infoblox/utils/client.py index bae55d40a..95885b34a 100644 --- a/nautobot_ssot/integrations/infoblox/utils/client.py +++ b/nautobot_ssot/integrations/infoblox/utils/client.py @@ -2,20 +2,21 @@ from __future__ import annotations -import json import ipaddress +import json import logging import re import urllib.parse from collections import defaultdict +from functools import lru_cache from typing import Optional + import requests +from dns import reversename from requests.auth import HTTPBasicAuth -from requests.exceptions import HTTPError from requests.compat import urljoin -from dns import reversename -from nautobot.core.settings_funcs import is_truthy -from nautobot_ssot.integrations.infoblox.constant import PLUGIN_CFG +from requests.exceptions import HTTPError + from nautobot_ssot.integrations.infoblox.utils.diffsync import get_ext_attr_dict logger = logging.getLogger("nautobot.ssot.infoblox") @@ -35,26 +36,31 @@ def parse_url(address): return urllib.parse.urlparse(address) -def get_default_ext_attrs(review_list: list) -> dict: +def get_default_ext_attrs(review_list: list, excluded_attrs: Optional[list] = None) -> dict: """Determine the default Extensibility Attributes for an object being processed. Args: review_list (list): The list of objects that need to be reviewed to gather default Extensibility Attributes. + excluded_attrs (list): List of Extensibility Attributes to exclude. Returns: dict: Dictionary of default Extensibility Attributes for a VLAN View, VLANs, Prefixes, or IP Addresses. """ + if excluded_attrs is None: + excluded_attrs = [] default_ext_attrs = {} for item in review_list: - pf_ext_attrs = get_ext_attr_dict(extattrs=item.get("extattrs", {})) - for attr in pf_ext_attrs: + normalized_ext_attrs = get_ext_attr_dict(extattrs=item.get("extattrs", {}), excluded_attrs=excluded_attrs) + for attr in normalized_ext_attrs: + if attr in excluded_attrs: + continue if attr not in default_ext_attrs: default_ext_attrs[attr] = None return default_ext_attrs def get_dns_name(possible_fqdn: str) -> str: - """Validate passed FQDN and returns if found. + """Validates passed FQDN and returns if found. Args: possible_fqdn (str): Potential string to be used for IP Address dns_name. @@ -98,11 +104,14 @@ class InfobloxApi: # pylint: disable=too-many-public-methods, too-many-instanc def __init__( self, - url=PLUGIN_CFG.get("NAUTOBOT_INFOBLOX_URL"), - username=PLUGIN_CFG.get("NAUTOBOT_INFOBLOX_USERNAME"), - password=PLUGIN_CFG.get("NAUTOBOT_INFOBLOX_PASSWORD"), - verify_ssl=is_truthy(PLUGIN_CFG.get("NAUTOBOT_INFOBLOX_VERIFY_SSL")), - wapi_version=PLUGIN_CFG.get("NAUTOBOT_INFOBLOX_WAPI_VERSION"), + url, + username, + password, + verify_ssl, + wapi_version, + timeout, + debug=False, + network_view_to_dns_map=None, cookie=None, ): # pylint: disable=too-many-arguments """Initialize Infoblox class.""" @@ -116,7 +125,17 @@ def __init__( self.url = parsed_url.geturl() self.auth = HTTPBasicAuth(username, password) self.wapi_version = wapi_version + self.timeout = timeout self.session = self._init_session(verify_ssl=verify_ssl, cookie=cookie) + # Used to select correct DNS View when creating DNS records + self.network_view_to_dns_map = {} + if network_view_to_dns_map and isinstance(network_view_to_dns_map, dict): + self.network_view_to_dns_map.update(network_view_to_dns_map) + # Change logging level to Debug if Debug checkbox is ticked in the Job form + logging_level = logging.DEBUG if debug else logging.INFO + logger.setLevel(logging_level) + for handler in logger.handlers: + handler.setLevel(logging_level) def _init_session(self, verify_ssl: bool, cookie: Optional[dict]) -> requests.Session: """Initialize requests Session object that is used across all the API calls. @@ -160,15 +179,22 @@ def _request(self, method, path, **kwargs): else: self.session.auth = self.auth - resp = self.session.request(method, url, timeout=PLUGIN_CFG["infoblox_request_timeout"], **kwargs) + resp = self.session.request(method, url, timeout=self.timeout, **kwargs) # Infoblox provides meaningful error messages for error codes >= 400 + err_msg = "HTTP error while talking to Infoblox API." if resp.status_code >= 400: try: err_msg = resp.json() except json.decoder.JSONDecodeError: err_msg = resp.text logger.error(err_msg) - resp.raise_for_status() + # Ensure Job logs display error messages retrieved from the Infoblox API response. + # Default error message does not have enough context. + try: + resp.raise_for_status() + except HTTPError as err: + exc_msg = f"{str(err)}. {err_msg}" + raise HTTPError(exc_msg, response=err.response) from err return resp def _delete(self, resource): @@ -185,14 +211,14 @@ def _delete(self, resource): """ response = self._request("DELETE", resource) try: - logger.info(response.json()) + logger.debug(response.json()) return response.json() except json.decoder.JSONDecodeError: - logger.info(response.text) + logger.error(response.text) return response.text def _update(self, resource, **params): - """Delete a resource from Infoblox. + """Update a resource in Infoblox. Args: resource (str): Resource to update @@ -206,17 +232,20 @@ def _update(self, resource, **params): """ response = self._request("PUT", path=resource, params=params) try: - logger.info(response.json()) + logger.debug(response.json()) return response.json() except json.decoder.JSONDecodeError: - logger.info(response.text) + logger.error(response.text) return response.text - def _get_network_ref(self, prefix): # pylint: disable=inconsistent-return-statements + def _get_network_ref( + self, prefix, network_view: Optional[str] = None + ): # pylint: disable=inconsistent-return-statements """Fetch the _ref of a prefix resource. Args: prefix (str): IPv4 Prefix to fetch the _ref for. + network_view (str): Network View of the prefix to fetch the _ref for. Returns: (str) network _ref or None @@ -224,15 +253,29 @@ def _get_network_ref(self, prefix): # pylint: disable=inconsistent-return-state Returns Response: "network/ZG5zLm5ldHdvcmskMTkyLjAuMi4wLzI0LzA:192.0.2.0/24/default" """ - for item in self.get_all_subnets(prefix): - if item["network"] == prefix: - return item["_ref"] + url_path = "network" + params = {"network": prefix, "_return_as_object": 1} + if network_view: + params["network_view"] = network_view + response = self._request("GET", url_path, params=params) + try: + logger.debug(response.json()) + results = response.json().get("result") + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + if results: + return results[0].get("_ref") + return None - def _get_network_container_ref(self, prefix): # pylint: disable=inconsistent-return-statements + def _get_network_container_ref( + self, prefix, network_view: Optional[str] = None + ): # pylint: disable=inconsistent-return-statements """Fetch the _ref of a networkcontainer resource. Args: prefix (str): IPv4 Prefix to fetch the _ref for. + network_view (str): Network View of the prefix to fetch the _ref for. Returns: (str) networkcontainer _ref or None @@ -240,12 +283,23 @@ def _get_network_container_ref(self, prefix): # pylint: disable=inconsistent-re Returns Response: "networkcontainer/ZG5zLm5ldHdvcmtfY29udGFpbmVyJDE5Mi4xNjguMi4wLzI0LzA:192.168.2.0/24/default" """ - for item in self.get_network_containers(): - if item["network"] == prefix: - return item["_ref"] + url_path = "networkcontainer" + params = {"network": prefix, "_return_as_object": 1} + if network_view: + params["network_view"] = network_view + response = self._request("GET", url_path, params=params) + try: + logger.debug(response.json()) + results = response.json().get("result") + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + if results and len(results): + return results[0] + return None def get_all_ipv4address_networks(self, prefixes): - """Get all used / unused IPv4 addresses within the supplied network. + """Get all used / unused IPv4 addresses within the supplied networks. Args: prefixes (List[tuple]): List of Network prefixes and associated network view - ('10.220.0.0/22', 'default') @@ -319,7 +373,7 @@ def get_ipaddrs(url_path: str, data: dict) -> list: try: response = self._request(method="POST", path=url_path, json=data) except HTTPError as err: - logger.info(err.response.text) + logger.error(err.response.text) if response: # This should flatten the results, not return the first entry results = [] @@ -344,7 +398,7 @@ def create_payload(prefix: str, view: str) -> dict: "object": "ipv4address", "data": {"network_view": view, "network": prefix, "status": "USED"}, "args": { - "_return_fields": "ip_address,mac_address,names,network,objects,status,types,usage,comment,extattrs" + "_return_fields": "ip_address,mac_address,names,network,network_view,objects,status,types,usage,comment,extattrs" }, } return query @@ -378,11 +432,12 @@ def create_payload(prefix: str, view: str) -> dict: return ipaddrs - def create_network(self, prefix, comment=None): + def create_network(self, prefix, comment=None, network_view: Optional[str] = None): """Create a network. Args: prefix (str): IP network to create. + network_view (str): Name of the network view, e.g. 'dev' Returns: (str) of reference network @@ -391,16 +446,19 @@ def create_network(self, prefix, comment=None): "network/ZG5zLm5ldHdvcmskMTkyLjE2OC4wLjAvMjMvMA:192.168.0.0/23/default" """ params = {"network": prefix, "comment": comment} + if network_view: + params["network_view"] = network_view api_path = "network" response = self._request("POST", api_path, params=params) - logger.info(response.text) + logger.debug(response.text) return response.text - def delete_network(self, prefix): + def delete_network(self, prefix, network_view: Optional[str] = None): """Delete a network. Args: prefix (str): IPv4 prefix to delete. + network_view (str): Name of the network view, e.g. 'dev' Returns: (dict) deleted prefix. @@ -408,7 +466,7 @@ def delete_network(self, prefix): Returns Response: {"deleted": "network/ZG5zLm5ldHdvcmskMTkyLjAuMi4wLzI0LzA:192.0.2.0/24/default"} """ - resource = self._get_network_ref(prefix) + resource = self._get_network_ref(prefix=prefix, network_view=network_view) if resource: self._delete(resource) @@ -416,15 +474,16 @@ def delete_network(self, prefix): else: response = {"error": f"{prefix} not found."} - logger.info(response) + logger.debug(response) return response - def update_network(self, prefix, comment=None): + def update_network(self, prefix, comment=None, network_view: Optional[str] = None): """Update a network. Args: (str): IPv4 prefix to update. comment (str): IPv4 prefix update comment. + network_view (str): Name of the network view, e.g. 'dev' Returns: (dict) updated prefix. @@ -432,7 +491,7 @@ def update_network(self, prefix, comment=None): Return Response: {"updated": "network/ZG5zLm5ldHdvcmskMTkyLjE2OC4wLjAvMjMvMA:192.168.0.0/23/default"} """ - resource = self._get_network_ref(prefix) + resource = self._get_network_ref(prefix=prefix, network_view=network_view) if resource: params = {"network": prefix, "comment": comment} @@ -440,14 +499,15 @@ def update_network(self, prefix, comment=None): response = {"updated": resource} else: response = {"error": f"error updating {prefix}"} - logger.info(response) + logger.debug(response) return response - def create_network_container(self, prefix, comment=None): + def create_network_container(self, prefix, comment=None, network_view: Optional[str] = None): """Create a network container. Args: prefix (str): IP network to create. + network_view (str): Name of the network view, e.g. 'dev' Returns: (str) of reference network @@ -456,16 +516,19 @@ def create_network_container(self, prefix, comment=None): "networkcontainer/ZG5zLm5ldHdvcmskMTkyLjE2OC4wLjAvMjMvMA:192.168.0.0/23/default" """ params = {"network": prefix, "comment": comment} + if network_view: + params["network_view"] = network_view api_path = "networkcontainer" response = self._request("POST", api_path, params=params) - logger.info(response.text) + logger.debug(response.text) return response.text - def delete_network_container(self, prefix): + def delete_network_container(self, prefix, network_view: Optional[str] = None): """Delete a network container. Args: prefix (str): IPv4 prefix to delete. + network_view (str): Name of the network view, e.g. 'dev' Returns: (dict) deleted prefix. @@ -473,23 +536,25 @@ def delete_network_container(self, prefix): Returns Response: {"deleted": "networkcontainer/ZG5zLm5ldHdvcmskMTkyLjAuMi4wLzI0LzA:192.0.2.0/24/default"} """ - resource = self._get_network_container_ref(prefix) + resource = self._get_network_container_ref(prefix=prefix, network_view=network_view) if resource: self._delete(resource) response = {"deleted": resource} else: - response = {"error": f"{prefix} not found."} + nv_msg = f" in network view {network_view}" if network_view else "" + response = {"error": f"{prefix}{nv_msg} not found."} - logger.info(response) + logger.debug(response) return response - def update_network_container(self, prefix, comment=None): + def update_network_container(self, prefix, comment=None, network_view: Optional[str] = None): """Update a network container. Args: (str): IPv4 prefix to update. comment (str): IPv4 prefix update comment. + network_view (str): Name of the network view, e.g. 'dev' Returns: (dict) updated prefix. @@ -497,24 +562,26 @@ def update_network_container(self, prefix, comment=None): Return Response: {"updated": "networkcontainer/ZG5zLm5ldHdvcmskMTkyLjE2OC4wLjAvMjMvMA:192.168.0.0/23/default"} """ - resource = self._get_network_container_ref(prefix) + resource = self._get_network_container_ref(prefix=prefix, network_view=network_view) if resource: - params = {"network": prefix, "comment": comment} + params = {"comment": comment} self._update(resource, **params) response = {"updated": resource} else: - response = {"error": f"error updating {prefix}"} - logger.info(response) + nv_msg = f" in network view {network_view}" if network_view else "" + response = {"error": f"error updating {prefix}{nv_msg}"} + logger.debug(response) return response - def create_range(self, prefix: str, start: str, end: str) -> str: + def create_range(self, prefix: str, start: str, end: str, network_view: Optional[str] = None) -> str: """Create a range. Args: prefix: IP network range belongs to. start: The starting IP of the range. end: The ending IP of the range. + network_view (str): Name of the network view, e.g. 'dev' Returns: str: Object reference of range. @@ -523,19 +590,19 @@ def create_range(self, prefix: str, start: str, end: str) -> str: "range/ZG5zLm5ldHdvcmskMTkyLjE2OC4wLjAvMjMvMA:192.168.0.100/192.168.0.254/default" """ params = {"network": prefix, "start_addr": start, "end_addr": end} - plugin_defined_network_view = PLUGIN_CFG.get("NAUTOBOT_INFOBLOX_NETWORK_VIEW") - if plugin_defined_network_view: - params["network_view"] = plugin_defined_network_view + if network_view: + params["network_view"] = network_view api_path = "range" response = self._request("POST", api_path, params=params) - logger.info(response.text) + logger.debug(response.text) return response.text - def get_host_record_by_name(self, fqdn): + def get_host_record_by_name(self, fqdn, network_view: Optional[str] = None): """Get the host record by using FQDN. Args: fqdn (str): IPv4 Address to look up + network_view (str): Name of the network view, e.g. 'dev' Returns: (list) of record dicts @@ -560,15 +627,23 @@ def get_host_record_by_name(self, fqdn): """ url_path = "record:host" params = {"name": fqdn, "_return_as_object": 1} + if network_view: + params["network_view"] = network_view response = self._request("GET", url_path, params=params) - logger.info(response.json) - return response.json().get("result") + try: + logger.debug(response.json()) + results = response.json().get("result") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text - def get_host_record_by_ip(self, ip_address): + def get_host_record_by_ip(self, ip_address, network_view: Optional[str] = None): """Get the host record by using IP Address. Args: ip_address (str): IPv4 Address to look up + network_view (str): Name of the network view, e.g. 'dev' Returns: (list) of record dicts @@ -593,15 +668,61 @@ def get_host_record_by_ip(self, ip_address): """ url_path = "record:host" params = {"ipv4addr": ip_address, "_return_as_object": 1} + if network_view: + params["network_view"] = network_view response = self._request("GET", url_path, params=params) - logger.info(response.json) - return response.json().get("result") + try: + logger.debug(response.json()) + results = response.json().get("result") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def get_host_record_by_ref(self, ref: str): + """Get the Host record by ref. + + Args: + ref (str): reference to the Host record + + Returns: + (dict) Host record + + Return Response: + { + "_ref": "record:host/ZG5zLmhvc3QkLl9kZWZhdWx0LnRlc3QudGVzdGRldmljZTE:testdevice1.test/default", + "ipv4addrs": [ + { + "_ref": "record:host_ipv4addr/ZG5zLmhvc3RfYWRkcmVzcyQuX2RlZmF1bHQudGVzdC50ZXN0ZGV2aWNlMS4xMC4yMjAuMC4xMDEu:10.220.0.101/testdevice1.test/default", + "configure_for_dhcp": true, + "host": "testdevice1.test", + "ipv4addr": "10.220.0.101", + "mac": "11:11:11:11:11:11" + } + ], + "name": "testdevice1.test", + "view": "default" + } + """ + url_path = f"{ref}" + params = { + "_return_fields": "name,view,ipv4addrs,comment", + } + response = self._request("GET", path=url_path, params=params) + logger.error(response.text) + try: + logger.debug(response.json()) + return response.json() + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text - def get_a_record_by_name(self, fqdn): + def get_a_record_by_name(self, fqdn, network_view: Optional[str] = None): """Get the A record for a FQDN. Args: fqdn (str): "testdevice1.test" + network_view (str): Name of the network view, e.g. 'dev' Returns: (list) of record dicts @@ -618,15 +739,24 @@ def get_a_record_by_name(self, fqdn): """ url_path = "record:a" params = {"name": fqdn, "_return_as_object": 1} + if network_view: + dns_view = self.get_dns_view_for_network_view(network_view) + params["view"] = dns_view response = self._request("GET", url_path, params=params) - logger.info(response.json) - return response.json().get("result") + try: + logger.debug(response.json()) + results = response.json().get("result") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text - def get_a_record_by_ip(self, ip_address): + def get_a_record_by_ip(self, ip_address, network_view: Optional[str] = None): """Get the A record for a IP Address. Args: ip_address (str): "10.220.0.101" + network_view (str): Name of the network view, e.g. 'dev' Returns: (list) of record dicts @@ -642,16 +772,162 @@ def get_a_record_by_ip(self, ip_address): ] """ url_path = "record:a" - params = {"ipv4addr": ip_address, "_return_as_object": 1} + params = { + "ipv4addr": ip_address, + "_return_as_object": 1, + "_return_fields": "name,view,ipv4addr,comment", + } + if network_view: + dns_view = self.get_dns_view_for_network_view(network_view) + params["view"] = dns_view + response = self._request("GET", url_path, params=params) + try: + logger.debug(response.json()) + results = response.json().get("result") + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + if results: + return results[0] + return None + + def get_a_record_by_ref(self, ref: str): + """Get the A record by ref. + + Args: + ref (str): reference to the A record + + Returns: + (dict) A record + + Return Response: + [ + { + "_ref": "record:a/ZG5zLmJpbmRfYSQuX2RlZmF1bHQudGVzdCx0ZXN0ZGV2aWNlMSwxMC4yMjAuMC4xMDE:testdevice1.test/default", + "ipv4addr": "10.220.0.101", + "name": "testdevice1.test", + "view": "default" + } + ] + """ + url_path = f"{ref}" + params = { + "_return_fields": "name,view,ipv4addr,comment,extattrs", + } + response = self._request("GET", path=url_path, params=params) + logger.error(response.text) + try: + logger.debug(response.json()) + return response.json() + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def delete_a_record_by_ref(self, ref): + """Delete DNS A record by ref. + + Args: + ref (str): reference to the DNS A record + + Returns: + (dict) deleted DNS A record. + + Returns Response: + {"deleted": "record:a/ZG5zLmJpbmRfYSQuX2RlZmF1bHQudGVzdCx0ZXN0ZGV2aWNlMSwxMC4yMjAuMC4xMDE:testdevice1.test/default"} + """ + self._delete(ref) + response = {"deleted": ref} + + logger.debug(response) + return response + + def get_ptr_record_by_ref(self, ref: str): + """Get the PTR record by FQDN. + + Args: + ref (str): Reference to PTR record + + Returns: + (dict) PTR Record + + Return Response: + [ + { + "_ref": "record:ptr/ZG5zLmJpbmRfcHRyJC5fZGVmYXVsdC50ZXN0LjEwMS4wLjIyMC4xMC50ZXN0ZGV2aWNlMS50ZXN0:10.220.0.101.test/default", + "ptrdname": "testdevice1.test", + "view": "default" + } + ] + """ + url_path = f"{ref}" + params = { + "_return_fields": "name,ptrdname,ipv4addr,ipv6addr,view,comment", + } + response = self._request("GET", path=url_path, params=params) + logger.error(response.text) + try: + logger.debug(response.json()) + return response.json() + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def get_ptr_record_by_ip( + self, ip_address, network_view: Optional[str] = None + ): # pylint: disable=inconsistent-return-statements + """Get the PTR record by FQDN. + + Args: + ip_address (str): "record:ptr/ZG5zLmJpbmRfcHRyJC5fZGVmYXVsdC50ZXN0LjEwMS4wLjIyMC4xMC50ZXN0ZGV2aWNlMS50ZXN0:10.220.0.101.test/default" + network_view (str): Name of the network view, e.g. 'dev' + + Returns: + (dict) PTR Record + + Return Response: + { + "result": [ + { + "_ref": "record:ptr/ZG5zLmJpbmRfcHRyJC4yLmFycGEuaW4tYWRkci4xMC4wLjAuMS5ob3N0MS5uYXV0b2JvdC5sb2NhbC50ZXN0:1.0.0.10.in-addr.arpa/default.dev", + "extattrs": { + + }, + "ipv4addr": "10.0.0.1", + "ipv6addr": "", + "name": "1.0.0.10.in-addr.arpa", + "ptrdname": "host1.nautobot.local.test", + "view": "default.dev", + "zone": "in-addr.arpa" + } + ] + } + """ + url_path = "record:ptr" + params = { + "ipv4addr": ip_address, + "_return_as_object": 1, + "_return_fields": "ipv4addr,ipv6addr,name,view,extattrs,comment,zone,ptrdname", + } + if network_view: + dns_view = self.get_dns_view_for_network_view(network_view) + params["view"] = dns_view response = self._request("GET", url_path, params=params) - logger.info(response.json) - return response.json().get("result") + try: + logger.debug(response.json()) + results = response.json().get("result") + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + if results: + return results[0] + return None - def get_ptr_record_by_name(self, fqdn): + def get_ptr_record_by_name(self, fqdn, network_view: Optional[str] = None): """Get the PTR record by FQDN. Args: fqdn (str): "testdevice1.test" + network_view (str): Name of the network view, e.g. 'dev' Returns: (list) of record dicts @@ -667,9 +943,35 @@ def get_ptr_record_by_name(self, fqdn): """ url_path = "record:ptr" params = {"ptrdname": fqdn, "_return_as_object": 1} + if network_view: + dns_view = self.get_dns_view_for_network_view(network_view) + params["view"] = dns_view response = self._request("GET", url_path, params=params) - logger.info(response.json) - return response.json().get("result") + try: + logger.debug(response.json()) + results = response.json().get("result") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def delete_ptr_record_by_ref(self, ref): + """Delete DNS PTR record by ref. + + Args: + ref (str): reference to the DNS PTR record + + Returns: + (dict) deleted DNS PTR record. + + Returns Response: + {"deleted": "record:ptr/ZG5zLmJpbmRfYSQuX2RlZmF1bHQudGVzdCx0ZXN0ZGV2aWNlMSwxMC4yMjAuMC4xMDE:testdevice1.test/default"} + """ + self._delete(ref) + response = {"deleted": ref} + + logger.debug(response) + return response def get_all_dns_views(self): """Get all dns views. @@ -692,16 +994,21 @@ def get_all_dns_views(self): ] """ url_path = "view" - params = {"_return_as_object": 1} + params = {"_return_fields": "is_default,name,network_view", "_return_as_object": 1} response = self._request("GET", url_path, params=params) - logger.info(response.json) - return response.json().get("result") + try: + logger.debug(response.json()) + results = response.json().get("result") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text - def create_a_record(self, fqdn, ip_address): + def create_a_record(self, fqdn, ip_address, comment: Optional[str] = None, network_view: Optional[str] = None): """Create an A record for a given FQDN. - Please note: This API call with work only for host records that do not have an associated a record. - If an a record already exists, this will return a 400 error. + Args: + network_view (str): Name of the network view, e.g. 'dev' Returns: Dict: Dictionary of _ref and name @@ -715,9 +1022,19 @@ def create_a_record(self, fqdn, ip_address): url_path = "record:a" params = {"_return_fields": "name", "_return_as_object": 1} payload = {"name": fqdn, "ipv4addr": ip_address} + if network_view: + dns_view = self.get_dns_view_for_network_view(network_view) + payload["view"] = dns_view + if comment: + payload["comment"] = comment response = self._request("POST", url_path, params=params, json=payload) - logger.info(response.json) - return response.json().get("result") + try: + logger.debug(response.json()) + results = response.json().get("result") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text def get_dhcp_lease(self, lease_to_check): """Get a DHCP lease for the IP/hostname passed in. @@ -741,11 +1058,12 @@ def get_dhcp_lease(self, lease_to_check): return self.get_dhcp_lease_from_ipv4(lease_to_check) return self.get_dhcp_lease_from_hostname(lease_to_check) - def get_dhcp_lease_from_ipv4(self, ip_address): + def get_dhcp_lease_from_ipv4(self, ip_address, network_view: Optional[str] = None): """Get a DHCP lease for the IP address passed in. Args: ip_address (str): "192.168.0.1" + network_view (str): Name of the network view, e.g. 'dev' Returns: (list) of record dicts @@ -766,15 +1084,23 @@ def get_dhcp_lease_from_ipv4(self, ip_address): "_return_fields": "binding_state,hardware,client_hostname,fingerprint", "_return_as_object": 1, } + if network_view: + params["network_view"] = network_view response = self._request("GET", url_path, params=params) - logger.info(response.json) - return response.json() + try: + logger.debug(response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text - def get_dhcp_lease_from_hostname(self, hostname): + def get_dhcp_lease_from_hostname(self, hostname, network_view: Optional[str] = None): """Get a DHCP lease for the hostname passed in. Args: hostnames (str): "testdevice1.test" + network_view (str): Name of the network view, e.g. 'dev' Returns: (list) of record dicts @@ -795,15 +1121,25 @@ def get_dhcp_lease_from_hostname(self, hostname): "_return_fields": "binding_state,hardware,client_hostname,fingerprint", "_return_as_object": 1, } + if network_view: + params["network_view"] = network_view response = self._request("GET", url_path, params=params) - logger.info(response.json) - return response.json() + try: + logger.debug(response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text - def get_all_ranges(self, prefix: Optional[str] = None) -> dict[str, dict[str, list[dict[str, str]]]]: + def get_all_ranges( + self, prefix: Optional[str] = None, network_view: Optional[str] = None + ) -> dict[str, dict[str, list[dict[str, str]]]]: """Get all Ranges. Args: prefix: Network prefix - '10.220.0.0/22' + network_view (str): Name of the network view, e.g. 'dev' Returns: dict: The mapping of network_view to prefix to defined ranges. @@ -821,30 +1157,35 @@ def get_all_ranges(self, prefix: Optional[str] = None) -> dict[str, dict[str, li """ url_path = "range" params = {"_return_fields": "network,network_view,start_addr,end_addr", "_max_results": 10000} - plugin_defined_network_view = PLUGIN_CFG.get("NAUTOBOT_INFOBLOX_NETWORK_VIEW") - if plugin_defined_network_view: - params["network_view"] = plugin_defined_network_view + if network_view: + params["network_view"] = network_view if prefix: - params["network"]: prefix + params["network"] = prefix try: response = self._request("GET", url_path, params=params) except HTTPError as err: - logger.info(err.response.text) + logger.error(err.response.text) return {} - json_response = response.json() - logger.info(json_response) + try: + json_response = response.json() + logger.debug(json_response) + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + data = defaultdict(lambda: defaultdict(list)) for prefix_range in json_response: str_range = f"{prefix_range['start_addr']}-{prefix_range['end_addr']}" data[prefix_range["network_view"]][prefix_range["network"]].append(str_range) return data - def get_all_subnets(self, prefix: str = None, ipv6: bool = False): + def get_all_subnets(self, prefix: str = None, ipv6: bool = False, network_view: Optional[str] = None): """Get all Subnets. Args: prefix (str): Network prefix - '10.220.0.0/22' ipv6 (bool): Whether or not the call should be made for IPv6 subnets. + network_view (str): Name of the network view, e.g. 'dev' Returns: (list) of record dicts @@ -879,19 +1220,25 @@ def get_all_subnets(self, prefix: str = None, ipv6: bool = False): "_return_fields": "network,network_view,comment,extattrs,rir_organization,rir,vlans", "_max_results": 10000, } - if PLUGIN_CFG.get("NAUTOBOT_INFOBLOX_NETWORK_VIEW"): - params.update({"network_view": PLUGIN_CFG["NAUTOBOT_INFOBLOX_NETWORK_VIEW"]}) + if network_view: + params.update({"network_view": network_view}) if prefix: params.update({"network": prefix}) try: response = self._request("GET", url_path, params=params) except HTTPError as err: - logger.info(err.response.text) + logger.error(err.response.text) return [] - json_response = response.json() - logger.info(json_response) + try: + logger.debug(response.json()) + json_response = response.json() + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + # In-place update json_response containing prefixes with DHCP ranges, if found. + # This should be an opt-in if not ipv6: - ranges = self.get_all_ranges(prefix=prefix) + ranges = self.get_all_ranges(prefix=prefix, network_view=network_view) for returned_prefix in json_response: network_view_ranges = ranges.get(returned_prefix["network_view"], {}) prefix_ranges = network_view_ranges.get(returned_prefix["network"]) @@ -901,8 +1248,11 @@ def get_all_subnets(self, prefix: str = None, ipv6: bool = False): logger.info("Support for DHCP Ranges is not currently supported for IPv6 Networks.") return json_response - def get_authoritative_zone(self): - """Get authoritative zone to check if fqdn exists. + def get_authoritative_zone(self, network_view: Optional[str] = None): + """Get authoritative zones. + + Args: + network_view (str): Name of the network view, e.g. 'dev' Returns: (list) of zone dicts @@ -923,13 +1273,62 @@ def get_authoritative_zone(self): """ url_path = "zone_auth" params = {"_return_as_object": 1} + if network_view: + dns_view = self.get_dns_view_for_network_view(network_view) + params["view"] = dns_view response = self._request("GET", url_path, params=params) - logger.info(response.json()) - return response.json().get("result") + try: + logger.debug(response.json()) + results = response.json().get("result") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + @lru_cache(maxsize=1024) + def get_authoritative_zones_for_dns_view(self, view: str): + """Get authoritative zone list for given DNS view. - def _find_network_reference(self, network): + Returns: + (list) of zone dicts + view (str): Name of the DNS view, e.g. 'default.dev' + + Return Response: + [ + { + "_ref": "zone_auth/ZG5zLnpvbmUkLl9kZWZhdWx0LnRlc3Qtc2l0ZS1pbm5hdXRvYm90:test-site-innautobot/default", + "fqdn": "test-site-innautobot", + "view": "default" + }, + { + "_ref": "zone_auth/ZG5zLnpvbmUkLl9kZWZhdWx0LnRlc3Qtc2l0ZQ:test-site/default", + "fqdn": "test-site", + "view": "default" + }, + ] + """ + url_path = "zone_auth" + params = { + "view": view, + "zone_format": "FORWARD", + "_return_fields": "fqdn,view", + "_return_as_object": 1, + } + response = self._request("GET", path=url_path, params=params) + try: + logger.debug(response.json()) + results = response.json().get("result") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def _find_network_reference(self, network, network_view: Optional[str] = None): """Find the reference for the given network. + Args: + network_view (str): Name of the network view, e.g. 'dev' + Returns: Dict: Dictionary of _ref and name @@ -944,13 +1343,23 @@ def _find_network_reference(self, network): """ url_path = "network" params = {"network": network} + if network_view: + params["network_view"] = network_view response = self._request("GET", url_path, params=params) - logger.info(response.json()) - return response.json() + try: + logger.debug(response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text - def find_next_available_ip(self, network): + def find_next_available_ip(self, network, network_view: Optional[str] = None): """Find the next available ip address for a given network. + Args: + network_view (str): Name of the network view, e.g. 'dev' + Returns: Dict: @@ -964,9 +1373,13 @@ def find_next_available_ip(self, network): next_ip_avail = "" # Find the Network reference id try: - network_ref_id = self._find_network_reference(network) + network_ref_id = self._find_network_reference(network=network, network_view=network_view) except Exception as err: # pylint: disable=broad-except - logger.warning("Network reference not found for %s: %s", network, err) + if network_view: + err_msg = f"Network reference not found for {network}-{network_view}: {str(err)}" + else: + err_msg = f"Network reference not found for {network}: {str(err)}" + logger.warning(err_msg) return next_ip_avail if network_ref_id and isinstance(network_ref_id, list): @@ -975,14 +1388,69 @@ def find_next_available_ip(self, network): params = {"_function": "next_available_ip"} payload = {"num": 1} response = self._request("POST", url_path, params=params, json=payload) - logger.info(response.json()) + logger.debug(response.json()) next_ip_avail = response.json().get("ips")[0] return next_ip_avail - def reserve_fixed_address(self, network, mac_address): + def get_fixed_address_by_ref(self, ref: str): + """Get the Fixed Address object by ref. + + Args: + ref (str): reference to the Fixed Address object + + Returns: + (dict) Fixed Address object + + Return Response: + { + "_ref": "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjIuMi4u:10.0.0.2/dev", + "extattrs": { + + }, + "mac": "52:1f:83:d4:9a:2e", + "name": "host-fixed1", + "network": "10.0.0.0/24", + "network_view": "dev" + } + """ + url_path = f"{ref}" + params = { + "_return_fields": "mac,network,network_view,comment,extattrs,name", + } + response = self._request("GET", path=url_path, params=params) + logger.error(response.text) + try: + logger.debug(response.json()) + return response.json() + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def delete_fixed_address_record_by_ref(self, ref): + """Delete Fixed Address record by ref. + + Args: + ref (str): reference to the fixed address record + + Returns: + (dict) deleted fixed address record. + + Returns Response: + {"deleted": "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjIuMi4u:10.0.0.2/dev"} + """ + self._delete(ref) + response = {"deleted": ref} + + logger.debug(response) + return response + + def reserve_fixed_address(self, network, mac_address, network_view: Optional[str] = None): """Reserve the next available ip address for a given network range. + Args: + network_view (str): Name of the network view, e.g. 'dev' + Returns: Str: The IP Address that was reserved @@ -990,19 +1458,38 @@ def reserve_fixed_address(self, network, mac_address): "10.220.0.1" """ # Get the next available IP Address for this network - ip_address = self.find_next_available_ip(network) + ip_address = self.find_next_available_ip(network=network, network_view=network_view) if ip_address: url_path = "fixedaddress" params = {"_return_fields": "ipv4addr", "_return_as_object": 1} payload = {"ipv4addr": ip_address, "mac": mac_address} + if network_view: + payload["network_view"] = network_view response = self._request("POST", url_path, params=params, json=payload) - logger.info(response.json()) - return response.json().get("result").get("ipv4addr") + try: + logger.debug(response.json()) + results = response.json().get("result").get("ipv4addr") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text return False - def create_fixed_address(self, ip_address, mac_address): + def create_fixed_address( # pylint: disable=too-many-arguments + self, + ip_address, + name: str = None, + mac_address: Optional[str] = None, + comment: Optional[str] = None, + match_client: str = "MAC_ADDRESS", + network_view: Optional[str] = None, + ): """Create a fixed ip address within Infoblox. + Args: + network_view (str): Name of the network view, e.g. 'dev' + match_client: match client value, valid values are: "MAC_ADDRESS", "RESERVED" + Returns: Str: The IP Address that was reserved @@ -1011,16 +1498,61 @@ def create_fixed_address(self, ip_address, mac_address): """ url_path = "fixedaddress" params = {"_return_fields": "ipv4addr", "_return_as_object": 1} - payload = {"ipv4addr": ip_address, "mac": mac_address} + valid_match_client_choices = ["MAC_ADDRESS", "RESERVED"] + if match_client not in valid_match_client_choices: + return None + payload = {"ipv4addr": ip_address, "match_client": match_client} + if match_client == "MAC_ADDRESS" and mac_address: + payload["mac"] = mac_address + if network_view: + payload["network_view"] = network_view + if name: + payload["name"] = name + if comment: + payload["comment"] = comment response = self._request("POST", url_path, params=params, json=payload) - logger.info(response.json()) - return response.json().get("result").get("ipv4addr") + try: + logger.debug(response.json()) + results = response.json().get("result").get("ipv4addr") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def update_fixed_address(self, ref, data): + """Update a fixed ip address within Infoblox. + + Args: + ref (str): Reference to fixed address record - def create_host_record(self, fqdn, ip_address): + Returns: + Dict: Dictionary of _ref and name + + Return Response: + { + "_ref": "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMjIwLjAuMy4wLi4:10.220.0.3/default", + "ipv4addr": "10.220.0.3" + } + """ + params = {} + try: + response = self._request("PUT", path=ref, params=params, json=data) + except HTTPError as err: + logger.error("Could not update fixed address: %s for ref %s", err.response.text, ref) + return None + try: + logger.debug("Infoblox fixed address record updated: %s", response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def create_host_record(self, fqdn, ip_address, comment: Optional[str] = None, network_view: Optional[str] = None): """Create a host record for a given FQDN. - Please note: This API call with work only for host records that do not have an associated a record. - If an a record already exists, this will return a 400 error. + Args: + network_view (str): Name of the network view, e.g. 'dev' Returns: Dict: Dictionary of _ref and name @@ -1035,32 +1567,94 @@ def create_host_record(self, fqdn, ip_address): url_path = "record:host" params = {"_return_fields": "name", "_return_as_object": 1} payload = {"name": fqdn, "configure_for_dns": False, "ipv4addrs": [{"ipv4addr": ip_address}]} + if network_view: + payload["network_view"] = network_view + if comment: + payload["comment"] = comment try: response = self._request("POST", url_path, params=params, json=payload) except HTTPError as err: - logger.info("Host record error: %s", err.response.text) + logger.error("Host record error: %s", err.response.text) return [] - logger.info("Infoblox host record created: %s", response.json()) - return response.json().get("result") + try: + logger.debug("Infoblox host record created: %s", response.json()) + results = response.json().get("result") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def update_host_record(self, ref, data): + """Update a host record for a given FQDN. + + Args: + ref (str): Reference to Host record + + Returns: + Dict: Dictionary of _ref and name + + Return Response: + { - def delete_host_record(self, ip_address): - """Delete provided IP Address from Infoblox.""" - resource = self.get_host_record_by_ip(ip_address) + "_ref": "record:host/ZG5zLmhvc3QkLjEuY29tLmluZm9ibG94Lmhvc3Q:host.infoblox.com/default.test", + "name": "host.infoblox.com", + } + """ + params = {} + try: + response = self._request("PUT", path=ref, params=params, json=data) + except HTTPError as err: + logger.error("Could not update Host address: %s for ref %s", err.response.text, ref) + return None + try: + logger.debug("Infoblox host record updated: %s", response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def delete_host_record(self, ip_address, network_view: Optional[str] = None): + """Delete host record for provided IP Address from Infoblox. + + Args: + network_view (str): Name of the network view, e.g. 'dev' + """ + resource = self.get_host_record_by_ip(ip_address=ip_address, network_view=network_view) if resource: ref = resource[0]["_ref"] self._delete(ref) - response = {"deleted": ip_address} + response = {"deleted": ip_address, "network_view": network_view} else: - response = {"error": f"Did not find {ip_address}"} - logger.info(response) + response = {"error": f"Did not find IP address {ip_address} in network view {network_view}"} + logger.debug(response) return response - def create_ptr_record(self, fqdn, ip_address): - """Create an PTR record for a given FQDN. + def delete_host_record_by_ref(self, ref): + """Delete DNS Host record by ref. + + Args: + ref (str): reference to the DNS Host record + + Returns: + (dict) deleted DNS Host record. + + Returns Response: + {"deleted": "record:host/ZG5zLmhvc3QkLl9kZWZhdWx0LnRlc3QudGVzdGRldmljZTE:testdevice1.test/default"} + """ + self._delete(ref) + response = {"deleted": ref} + + logger.debug(response) + return response + + def create_ptr_record(self, fqdn, ip_address, comment: Optional[str] = None, network_view: Optional[str] = None): + """Create a PTR record for a given FQDN. Args: fqdn (str): Fully Qualified Domain Name ip_address (str): Host IP address + network_view (str): Name of the network view, e.g. 'dev' Returns: Dict: Dictionary of _ref and name @@ -1074,14 +1668,24 @@ def create_ptr_record(self, fqdn, ip_address): } """ url_path = "record:ptr" - params = {"_return_fields": "name,ptrdname,ipv4addr", "_return_as_object": 1} + params = {"_return_fields": "name,ptrdname,ipv4addr,view", "_return_as_object": 1} reverse_host = str(reversename.from_address(ip_address))[ 0:-1 ] # infoblox does not accept the top most domain '.', so we strip it payload = {"name": reverse_host, "ptrdname": fqdn, "ipv4addr": ip_address} + if network_view: + dns_view = self.get_dns_view_for_network_view(network_view) + payload["view"] = dns_view + if comment: + payload["comment"] = comment response = self._request("POST", url_path, params=params, json=payload) - logger.info("Infoblox PTR record created: %s", response.json()) - return response.json().get("result") + try: + logger.debug("Infoblox PTR record created: %s", response.json()) + results = response.json().get("result") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text def search_ipv4_address(self, ip_address): """Find if IP address is in IPAM. Returns empty list if address does not exist. @@ -1114,8 +1718,13 @@ def search_ipv4_address(self, ip_address): url_path = "search" params = {"address": ip_address, "_return_as_object": 1} response = self._request("GET", url_path, params=params) - logger.info(response.json()) - return response.json().get("result") + try: + logger.debug(response.json()) + results = response.json().get("result") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text def get_vlan_view(self, name="Nautobot"): """Retrieve a specific vlanview. @@ -1139,8 +1748,13 @@ def get_vlan_view(self, name="Nautobot"): url_path = "vlanview" params = {"name": name} response = self._request("GET", path=url_path, params=params) - logger.info(response.json()) - return response.json() + try: + logger.debug(response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text def create_vlan_view(self, name, start_vid=1, end_vid=4094): """Create a vlan view. @@ -1159,8 +1773,13 @@ def create_vlan_view(self, name, start_vid=1, end_vid=4094): url_path = "vlanview" params = {"name": name, "start_vlan_id": start_vid, "end_vlan_id": end_vid} response = self._request("POST", path=url_path, params=params) - logger.info(response.json()) - return response.json() + try: + logger.debug(response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text def get_vlanviews(self): """Retrieve all VLANViews from Infoblox. @@ -1189,8 +1808,13 @@ def get_vlanviews(self): url_path = "vlanview" params = {"_return_fields": "name,comment,start_vlan_id,end_vlan_id,extattrs"} response = self._request("GET", url_path, params=params) - logger.info(response.json()) - return response.json() + try: + logger.debug(response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text def get_vlans(self): """Retrieve all VLANs from Infoblox. @@ -1238,8 +1862,16 @@ def get_vlans(self): ] ) response = self._request("POST", url_path, data=payload) - logger.info(response.json()[0]) - return response.json()[0] + try: + logger.debug(response.json()) + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + if len(response.json()): + return response.json()[0] + + return [] def create_vlan(self, vlan_id, vlan_name, vlan_view): """Create a VLAN in Infoblox. @@ -1266,8 +1898,13 @@ def create_vlan(self, vlan_id, vlan_name, vlan_view): params = {} payload = {"parent": parent, "id": vlan_id, "name": vlan_name} response = self._request("POST", url_path, params=params, json=payload) - logger.info(response.json()) - return response.json() + try: + logger.debug(response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text @staticmethod def get_ipaddr_status(ip_record: dict) -> str: @@ -1285,7 +1922,7 @@ def get_ipaddr_type(ip_record: dict) -> str: return "slaac" return "host" - def _find_resource(self, resource, **params): + def _find_matching_resources(self, resource, **params): """Find the resource for given parameters. Returns: @@ -1295,76 +1932,168 @@ def _find_resource(self, resource, **params): _ref: fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMjIwLjAuMy4wLi4:10.220.0.3/default """ response = self._request("GET", resource, params=params) - logger.info(response.json()) - for _resource in response.json(): - return _resource.get("_ref") - return response.json() + try: + logger.debug(response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text - # TODO: See if we should accept params dictionary and extended to both host record and fixed address - def update_ipaddress(self, ip_address, **data): # pylint: disable=inconsistent-return-statements - """Update a Network object with a given prefix. + def update_ptr_record(self, ref, data): # pylint: disable=inconsistent-return-statements + """Update a PTR Record. Args: - prefix (str): Valid IP prefix + ref (str): Reference to PTR record data (dict): keyword args used to update the object e.g. comment="updateme" Returns: Dict: Dictionary of _ref and name + Return Response: + { + "_ref": "record:ptr/ZG5zLmJpbmRfcHRyJC5fZGVmYXVsdC5hcnBhLmluLWFkZHIuMTAuMjIzLjkuOTYucjQudGVzdA:96.9.223.10.in-addr.arpa/default", + "ipv4addr": "10.223.9.96", + "name": "96.9.223.10.in-addr.arpa", + "ptrdname": "r4.test" + } + """ + params = {} + try: + logger.debug(data) + response = self._request("PUT", path=ref, params=params, json=data) + except HTTPError as err: + logger.error("Could not update DNS PTR record: %s for ref %s", err.response.text, ref) + return None + try: + logger.debug("Infoblox DNS PTR record updated: %s", response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def update_a_record(self, ref, data): # pylint: disable=inconsistent-return-statements + """Update an A record. + + Args: + ref (str): Reference to A record + data (dict): keyword args used to update the object e.g. comment="updateme" + + Returns: + Dict: Dictionary of _ref and name + + Return Response: + { + "_ref": "record:a/ZG5zLmJpbmRfYSQuX2RlZmF1bHQudGVzdCx0ZXN0ZGV2aWNlMSwxMC4yMjAuMC4xMDE:testdevice1.test/default", + "ipv4addr": "10.220.0.101", + "name": "testdevice1.test", + "view": "default" + } + """ + params = {} + try: + logger.debug(data) + response = self._request("PUT", path=ref, params=params, json=data) + except HTTPError as err: + logger.error("Could not update DNS A record: %s for ref %s", err.response.text, ref) + return None + try: + logger.debug("Infoblox DNS A record updated: %s", response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def update_ipaddress( + self, + ip_address, + data, + network_view: Optional[str] = None, + ): # pylint: disable=inconsistent-return-statements + """Update a IP Address object with a given ip address. + + Args: + ip_address (str): Valid IP address + data (dict): keyword args used to update the object e.g. comment="updateme" + network_view (str): Name of the network view, e.g. 'dev' + + Returns: + Dict: Dictionary of _ref and name + Return Response: { "_ref": "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMjIwLjAuMy4wLi4:10.220.0.3/default", "ipv4addr": "10.220.0.3" } """ - resource = self._find_resource("search", address=ip_address) - if not resource: - return - # params = {"_return_fields": "ipv4addr", "_return_as_object": 1} + resources = self._find_matching_resources("search", address=ip_address) + if not resources: + return None + ipv4_ref = None + # We can get multiple resources of varying types. The name of resource is embedded in the `_ref` attr + resource_types = ["fixedaddress"] + for resource in resources: + ref = resource.get("_ref") + if ref.split("/")[0] not in resource_types: + continue + if network_view and resource.get("network_view") != network_view: + continue + if resource.get("ipv4addr") != ip_address: + continue + ipv4_ref = ref + break + + if not ipv4_ref: + return None params = {} try: - logger.info(data) - response = self._request("PUT", path=resource, params=params, json=data["data"]) + logger.debug(data) + response = self._request("PUT", path=ipv4_ref, params=params, json=data) except HTTPError as err: - logger.info("Resource: %s", resource) - logger.info("Could not update IP address: %s", err.response.text) - return - logger.info("Infoblox IP Address updated: %s", response.json()) - return response.json() + logger.error("Could not update IP address: %s for ref %s", err.response.text, ipv4_ref) + return None + try: + logger.debug("Infoblox IP Address updated: %s", response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text - def get_tree_from_container(self, root_container: str) -> list: + def get_tree_from_container(self, root_container: str, network_view: Optional[str] = None) -> list: """Returns the list of all child containers from a given root container.""" flattened_tree = [] stack = [] root_containers = self.get_network_containers(prefix=root_container) + if network_view: + root_containers = self.get_network_containers(prefix=root_container, network_view=network_view) + else: + root_containers = self.get_network_containers(prefix=root_container) if root_containers: stack = [root_containers[0]] + get_child_network_containers_kwargs = {} + if network_view: + get_child_network_containers_kwargs["network_view"] = network_view + while stack: current_node = stack.pop() + get_child_network_containers_kwargs.update({"prefix": current_node["network"]}) flattened_tree.append(current_node) - children = self.get_child_network_containers(prefix=current_node["network"]) + children = self.get_child_network_containers(**get_child_network_containers_kwargs) stack.extend(children) return flattened_tree - def remove_duplicates(self, network_list: list) -> list: - """Removes duplicate networks from a list of networks.""" - seen_networks = set() - new_list = [] - for network in network_list: - if network["network"] not in seen_networks: - new_list.append(network) - seen_networks.add(network["network"]) - - return new_list - - def get_network_containers(self, prefix: str = "", ipv6: bool = False): + def get_network_containers(self, prefix: str = "", ipv6: bool = False, network_view: Optional[str] = None): """Get all Network Containers. Args: prefix (str): Specific prefix (192.168.0.1/24) ipv6 (bool): Whether the call should be made for IPv6 network containers. + network_view (str): Name of the network view, e.g. 'dev' Returns: (list) of record dicts @@ -1391,21 +2120,27 @@ def get_network_containers(self, prefix: str = "", ipv6: bool = False): "_return_fields": "network,comment,network_view,extattrs,rir_organization,rir", "_max_results": 100000, } - if PLUGIN_CFG.get("NAUTOBOT_INFOBLOX_NETWORK_VIEW"): - params.update({"network_view": PLUGIN_CFG["NAUTOBOT_INFOBLOX_NETWORK_VIEW"]}) + if network_view: + params.update({"network_view": network_view}) if prefix: params.update({"network": prefix}) response = self._request("GET", url_path, params=params) - response = response.json() - logger.info(response) - results = response.get("result", []) + try: + logger.debug(response.json()) + results = response.json().get("result", []) + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text for res in results: res.update({"status": "container"}) return results - def get_child_network_containers(self, prefix: str): + def get_child_network_containers(self, prefix: str, network_view: Optional[str] = None): """Get all Child Network Containers for Container. + Args: + network_view (str): Name of the network view, e.g. 'dev' + Returns: (list) of record dicts @@ -1435,22 +2170,26 @@ def get_child_network_containers(self, prefix: str): "_return_fields": "network,comment,network_view,extattrs,rir_organization,rir", "_max_results": 100000, } - if PLUGIN_CFG.get("NAUTOBOT_INFOBLOX_NETWORK_VIEW"): - params.update({"network_view": PLUGIN_CFG["NAUTOBOT_INFOBLOX_NETWORK_VIEW"]}) + if network_view: + params.update({"network_view": network_view}) params.update({"network_container": prefix}) response = self._request("GET", url_path, params=params) - response = response.json() - logger.info(response) - results = response.get("result", []) + try: + logger.debug(response.json()) + results = response.json().get("result", []) + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text for res in results: res.update({"status": "container"}) return results - def get_child_subnets_from_container(self, prefix: str): + def get_child_subnets_from_container(self, prefix: str, network_view: Optional[str] = None): """Get child subnets from container. Args: prefix (str): Network prefix - '10.220.0.0/22' + network_view (str): Name of the network view, e.g. 'dev' Returns: (list) of record dicts @@ -1483,15 +2222,161 @@ def get_child_subnets_from_container(self, prefix: str): "_return_fields": "network,network_view,comment,extattrs,rir_organization,rir,vlans", "_max_results": 10000, } - if PLUGIN_CFG.get("NAUTOBOT_INFOBLOX_NETWORK_VIEW"): - params.update({"network_view": PLUGIN_CFG["NAUTOBOT_INFOBLOX_NETWORK_VIEW"]}) + if network_view: + params.update({"network_view": network_view}) params.update({"network_container": prefix}) try: response = self._request("GET", url_path, params=params) except HTTPError as err: - logger.info(err.response.text) + logger.error(err.response.text) return [] - response = response.json() - logger.info(response) - return response.get("result") + try: + logger.debug(response.json()) + results = response.json().get("result") + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def get_network_views(self): + """Get all network views. + + Returns: + (list) of record dicts + + Return Response: + [ + { + "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:default/true", + "associated_dns_views": [ + "default" + ], + "extattrs": { + + }, + "is_default": true, + "name": "default" + }, + { + "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQx:prod/false", + "associated_dns_views": [ + "default.prod" + ], + "extattrs": { + + }, + "is_default": false, + "name": "prod" + }, + { + "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQy:dev/false", + "associated_dns_views": [ + "default.dev" + ], + "extattrs": { + + }, + "is_default": false, + "name": "dev" + } + ] + """ + url_path = "networkview" + params = { + "_return_fields": "name,associated_dns_views,extattrs,comment,is_default", + } + try: + response = self._request("GET", url_path, params=params) + except HTTPError as err: + logger.error(err.response.text) + return [] + try: + logger.debug(response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def get_network_view(self, name: str): + """Get network view object for given name. + + Args: + name (str): Name of the network view - 'dev' + + Returns: + (dict) record dict + + Return Response: + [ + { + "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQy:dev/false", + "associated_dns_views": [ + "default.dev" + ], + "extattrs": { + + }, + "is_default": false, + "name": "dev" + } + ] + """ + url_path = "networkview" + params = { + "name": name, + "_return_fields": "name,associated_dns_views,extattrs,comment,is_default", + } + try: + response = self._request("GET", path=url_path, params=params) + except HTTPError as err: + logger.error(err.response.text) + return [] + try: + logger.debug(response.json()) + results = response.json() + return results + except json.decoder.JSONDecodeError: + logger.error(response.text) + return response.text + + def get_dns_view_for_network_view(self, network_view: str): + """Get DNS view for given network view. + + Use DNS view defined in the Infoblox Config. If the mapping is not defined retrieve the default DNS View. + + Args: + network_view (str): Name of the network view - 'dev' + + Returns: + (str) name of the DNS view + """ + if network_view in self.network_view_to_dns_map: + return self.network_view_to_dns_map[network_view] + + dns_view = self.get_default_dns_view_for_network_view(network_view) + # Cache the value to avoid excessive API queries + if dns_view: + self.network_view_to_dns_map[network_view] = dns_view + else: + logger.warning(f"Cannot find DNS View for Network View {network_view}.") + + return dns_view + + @lru_cache(maxsize=1024) + def get_default_dns_view_for_network_view(self, network_view: str): + """Get default (first on the list) DNS view for given network view. + + Args: + network_view (str): Name of the network view - 'dev' + + Returns: + (str) name of the default DNS view + """ + _network_view = self.get_network_view(network_view) + if _network_view and "associated_dns_views" in _network_view[0]: + return _network_view[0]["associated_dns_views"][0] + # There is no easy way to recover if the network view is somehow missing associated dns views. + # This should only really happen if there's no network view for the provided name. + raise ValueError("Error retrieving the default DNS View for Network View {network_view}.") diff --git a/nautobot_ssot/integrations/infoblox/utils/diffsync.py b/nautobot_ssot/integrations/infoblox/utils/diffsync.py index df9829019..d96e4c7ba 100644 --- a/nautobot_ssot/integrations/infoblox/utils/diffsync.py +++ b/nautobot_ssot/integrations/infoblox/utils/diffsync.py @@ -1,9 +1,12 @@ """Utilities for DiffSync related stuff.""" +from typing import Optional + from django.contrib.contenttypes.models import ContentType from django.utils.text import slugify -from nautobot.ipam.models import IPAddress, Prefix, VLAN from nautobot.extras.models import CustomField, Tag +from nautobot.ipam.models import VLAN, IPAddress, Namespace, Prefix + from nautobot_ssot.integrations.infoblox.constant import TAG_COLOR @@ -17,7 +20,7 @@ def create_tag_sync_from_infoblox(): "color": TAG_COLOR, }, ) - for model in [IPAddress, Prefix, VLAN]: + for model in [IPAddress, Namespace, Prefix, VLAN]: tag.content_types.add(ContentType.objects.get_for_model(model)) return tag @@ -47,7 +50,7 @@ def nautobot_vlan_status(status: str) -> str: return statuses[status] -def get_ext_attr_dict(extattrs: dict): +def get_ext_attr_dict(extattrs: dict, excluded_attrs: Optional[list] = None): """Rebuild Extensibility Attributes dict into standard k/v pattern. The standard extattrs dict pattern is to have the dict look like so: @@ -56,12 +59,17 @@ def get_ext_attr_dict(extattrs: dict): Args: extattrs (dict): Extensibility Attributes dict for object. + excluded_attrs (list): List of Extensibility Attributes to exclude. Returns: dict: Standardized dictionary for Extensibility Attributes. """ + if excluded_attrs is None: + excluded_attrs = [] fixed_dict = {} for key, value in extattrs.items(): + if key in excluded_attrs: + continue fixed_dict[slugify(key).replace("-", "_")] = value["value"] return fixed_dict @@ -81,7 +89,35 @@ def build_vlan_map(vlans: list): return vlan_map -def get_default_custom_fields(cf_contenttype: ContentType) -> dict: +def get_valid_custom_fields(cfs: dict, excluded_cfs: Optional[list] = None): + """Remove custom fields that are on the excluded list. + + Args: + cfs: custom fields + excluded_cfs: list of excluded custom fields + """ + if excluded_cfs is None: + excluded_cfs = [] + default_excluded_cfs = [ + "dhcp_ranges", + "dns_a_record_comment", + "dns_host_record_comment", + "dns_ptr_record_comment", + "fixed_address_comment", + "mac_address", + "ssot_synced_to_infoblox", + ] + excluded_cfs.extend(default_excluded_cfs) + valid_cfs = {} + for cf_name, val in cfs.items(): + if cf_name in excluded_cfs: + continue + valid_cfs[cf_name] = val + + return valid_cfs + + +def get_default_custom_fields(cf_contenttype: ContentType, excluded_cfs: Optional[list] = None) -> dict: """Get default Custom Fields for specific ContentType. Args: @@ -90,10 +126,71 @@ def get_default_custom_fields(cf_contenttype: ContentType) -> dict: Returns: dict: Dictionary of all Custom Fields for a specific object type. """ + if excluded_cfs is None: + excluded_cfs = [] customfields = CustomField.objects.filter(content_types=cf_contenttype) + # These cfs are always excluded + default_excluded_cfs = [ + "dhcp_ranges", + "dns_a_record_comment", + "dns_host_record_comment", + "dns_ptr_record_comment", + "fixed_address_comment", + "mac_address", + "ssot_synced_to_infoblox", + ] + # User defined excluded cfs + excluded_cfs.extend(default_excluded_cfs) default_cfs = {} for customfield in customfields: - if customfield.key != "ssot_synced_to_infoblox": - if customfield.key not in default_cfs: - default_cfs[customfield.key] = None + if customfield.key in excluded_cfs: + continue + if customfield.key not in default_cfs: + default_cfs[customfield.key] = None return default_cfs + + +def map_network_view_to_namespace(value: str, direction: str) -> str: + """Remaps Infoblox Network View name to Nautobot Namespace name. + + This matters most for mapping default "default" Network View to default Namespace "Global". + + Args: + network_view (str): Infoblox Network View name + + Returns: + (str) corresponding Nautobot Namespace name + """ + network_view_to_namespace = { + "default": "Global", + } + namespace_to_network_view = {ns: nv for nv, ns in network_view_to_namespace.items()} + + if direction == "nv_to_ns": + return network_view_to_namespace.get(value, value) + if direction == "ns_to_nv": + return namespace_to_network_view.get(value, value) + + return None + + +def validate_dns_name(infoblox_client: object, dns_name: str, network_view: str) -> bool: + """Checks if DNS name matches any of the zones found in Infoblox. + + Args: + (object) infoblox_conn: Infoblox API client + (str) dns_name: DNS name + (str) network_view: network view name + + Returns: + (bool) + """ + dns_view = infoblox_client.get_dns_view_for_network_view(network_view=network_view) + zones = infoblox_client.get_authoritative_zones_for_dns_view(view=dns_view) + dns_name_valid = False + for zone in zones: + if zone["fqdn"] in dns_name: + dns_name_valid = True + break + + return dns_name_valid diff --git a/nautobot_ssot/integrations/infoblox/views.py b/nautobot_ssot/integrations/infoblox/views.py new file mode 100644 index 000000000..9ef9206e7 --- /dev/null +++ b/nautobot_ssot/integrations/infoblox/views.py @@ -0,0 +1,58 @@ +"""Views implementation for SSOT Infoblox.""" + +from nautobot.extras.views import ObjectChangeLogView, ObjectNotesView +from nautobot.apps.views import ( + ObjectDestroyViewMixin, + ObjectDetailViewMixin, + ObjectEditViewMixin, + ObjectListViewMixin, +) + +from .api.serializers import SSOTInfobloxConfigSerializer +from .filters import SSOTInfobloxConfigFilterSet +from .forms import SSOTInfobloxConfigFilterForm, SSOTInfobloxConfigForm +from .models import SSOTInfobloxConfig +from .tables import SSOTInfobloxConfigTable + + +class SSOTInfobloxConfigUIViewSet( + ObjectDestroyViewMixin, ObjectDetailViewMixin, ObjectListViewMixin, ObjectEditViewMixin +): # pylint: disable=abstract-method + """SSOTInfobloxConfig UI ViewSet.""" + + queryset = SSOTInfobloxConfig.objects.all() + table_class = SSOTInfobloxConfigTable + filterset_class = SSOTInfobloxConfigFilterSet + filterset_form_class = SSOTInfobloxConfigFilterForm + form_class = SSOTInfobloxConfigForm + serializer_class = SSOTInfobloxConfigSerializer + lookup_field = "pk" + action_buttons = ("add",) + + def get_template_name(self): + """Override inherited method to allow custom location for templates.""" + action = self.action + app_label = "nautobot_ssot_infoblox" + model_opts = self.queryset.model._meta + if action in ["create", "update"]: + template_name = f"{app_label}/{model_opts.model_name}_update.html" + elif action == "retrieve": + template_name = f"{app_label}/{model_opts.model_name}_retrieve.html" + elif action == "list": + template_name = f"{app_label}/{model_opts.model_name}_list.html" + else: + template_name = super().get_template_name() + + return template_name + + +class SSOTInfobloxConfigChangeLogView(ObjectChangeLogView): + """SSOTInfobloxConfig ChangeLog View.""" + + base_template = "nautobot_ssot_infoblox/ssotinfobloxconfig_retrieve.html" + + +class SSOTInfobloxConfigNotesView(ObjectNotesView): + """SSOTInfobloxConfig Notes View.""" + + base_template = "nautobot_ssot_infoblox/ssotinfobloxconfig_retrieve.html" diff --git a/nautobot_ssot/integrations/itential/__init__.py b/nautobot_ssot/integrations/itential/__init__.py new file mode 100644 index 000000000..fcdfba3e8 --- /dev/null +++ b/nautobot_ssot/integrations/itential/__init__.py @@ -0,0 +1 @@ +"""Itential SSoT.""" diff --git a/nautobot_ssot/integrations/itential/api/__init__.py b/nautobot_ssot/integrations/itential/api/__init__.py new file mode 100644 index 000000000..0e72a39c0 --- /dev/null +++ b/nautobot_ssot/integrations/itential/api/__init__.py @@ -0,0 +1 @@ +"""Itential SSoT Api.""" diff --git a/nautobot_ssot/integrations/itential/api/serializers.py b/nautobot_ssot/integrations/itential/api/serializers.py new file mode 100644 index 000000000..c06fbaacc --- /dev/null +++ b/nautobot_ssot/integrations/itential/api/serializers.py @@ -0,0 +1,19 @@ +"""Itential SSoT serializers.""" + +from rest_framework import serializers + +from nautobot.apps.api import NautobotModelSerializer + +from nautobot_ssot.integrations.itential import models + + +class AutomationGatewayModelSerializer(NautobotModelSerializer): # pylint: disable=too-many-ancestors + """AutomationGatewayModel serializer.""" + + url = serializers.HyperlinkedIdentityField(view_name="plugins-api:nautobot_ssot-api:automationgatewaymodel-detail") + + class Meta: + """Meta class definition.""" + + model = models.AutomationGatewayModel + fields = "__all__" diff --git a/nautobot_ssot/integrations/itential/api/urls.py b/nautobot_ssot/integrations/itential/api/urls.py new file mode 100644 index 000000000..7859c6d5e --- /dev/null +++ b/nautobot_ssot/integrations/itential/api/urls.py @@ -0,0 +1,10 @@ +"""Itential SSoT API URL's.""" + +from nautobot.apps.api import OrderedDefaultRouter +from nautobot_ssot.integrations.itential.api import views + + +router = OrderedDefaultRouter() +router.register("itential/automation-gateway", views.AutomationGatewayModelViewSet) + +urlpatterns = router.urls diff --git a/nautobot_ssot/integrations/itential/api/views.py b/nautobot_ssot/integrations/itential/api/views.py new file mode 100644 index 000000000..4575158e8 --- /dev/null +++ b/nautobot_ssot/integrations/itential/api/views.py @@ -0,0 +1,14 @@ +"""Itential SSoT API Views.""" + +from nautobot.apps.api import NautobotModelViewSet + +from nautobot_ssot.integrations.itential import models, filters +from nautobot_ssot.integrations.itential.api import serializers + + +class AutomationGatewayModelViewSet(NautobotModelViewSet): # pylint: disable=too-many-ancestors + """AutomationGatewayModel API ViewSet.""" + + queryset = models.AutomationGatewayModel.objects.all() + serializer_class = serializers.AutomationGatewayModelSerializer + filterset_class = filters.AutomationGatewayModelFilterSet diff --git a/nautobot_ssot/integrations/itential/clients.py b/nautobot_ssot/integrations/itential/clients.py new file mode 100644 index 000000000..2c691e75f --- /dev/null +++ b/nautobot_ssot/integrations/itential/clients.py @@ -0,0 +1,307 @@ +"""Itential SSoT API Clients.""" + +from typing import List, Optional, Union + +import requests + +from retry import retry + +from nautobot_ssot.integrations.itential.constants import BACKOFF, DELAY, RETRIES + + +class AutomationGatewayClient: # pylint: disable=too-many-instance-attributes + """Itential Automation Gateway API Client.""" + + def __init__( + self, + host: str, + username: str, + password: str, + job: object, + verify_ssl: Optional[bool] = True, + api_version: Optional[str] = "v2.0", + ): # pylint: disable=too-many-arguments + """Initialize the API client. + + Args: + host (str): Hostname or IP address of automation gateway. + username (str): Username. + password (str): Password. + job (object): Job object. + verify_ssl (Optional[bool], optional): Enable or disable verification of SSL. Defaults to True. + api_version (Optional[str], optional): Automation Gateway API version. + """ + self.host = host + self.username = username + self.password = password + self.job = job + self.verify_ssl = verify_ssl + self.api_version = api_version + self.session = requests.Session() + self.cookie = {} + + def __enter__(self): + """Context manager setup.""" + self.login() + + def __exit__(self, exc_type, exc_value, traceback): + """Context manager teardown.""" + self.logout() + + @property + def base_url(self): + """Build base URL.""" + return f"{self.host}/api/{self.api_version}" + + @retry(requests.exceptions.HTTPError, delay=DELAY, tries=RETRIES, backoff=BACKOFF) + def _get(self, uri: str) -> requests.Response: + """Perform a GET request to the specified uri.""" + response = self.session.get(f"{self.base_url}/{uri}", verify=self.verify_ssl) + return response + + @retry(requests.exceptions.HTTPError, delay=DELAY, tries=RETRIES, backoff=BACKOFF) + def _post(self, uri: str, json_data: Optional[dict] = None) -> requests.Response: + """Perform a POST request to the specified uri.""" + if json_data: + response = self.session.post(f"{self.base_url}/{uri}", json=json_data, verify=self.verify_ssl) + else: + response = self.session.post(f"{self.base_url}/{uri}", verify=self.verify_ssl) + return response + + @retry(requests.exceptions.HTTPError, delay=DELAY, tries=RETRIES, backoff=BACKOFF) + def _put(self, uri: str, json_data: Optional[dict] = None) -> requests.Response: + """Perform a PUT request to the specified uri.""" + if json_data: + response = self.session.put(f"{self.base_url}/{uri}", json=json_data, verify=self.verify_ssl) + else: + response = self.session.put(f"{self.base_url}/{uri}", verify=self.verify_ssl) + return response + + @retry(requests.exceptions.HTTPError, delay=DELAY, tries=RETRIES, backoff=BACKOFF) + def _delete(self, uri: str) -> requests.Response: + """Perform a GET request to the specified uri.""" + response = self.session.delete(f"{self.base_url}/{uri}", verify=self.verify_ssl) + return response + + def login(self) -> Union[requests.Response, requests.HTTPError]: + """Login to Automation Gateway.""" + response = self._post(uri="login", json_data={"username": self.username, "password": self.password}) + + if response.ok: + self.job.logger.info(f"Logging into {self.host}.") + self.cookie = {"AutomationGatewayToken": response.json()["token"]} + self.session.headers.update(self.cookie) + return response.json() + self.job.logger.warning(f"Failed to login to {self.host}.") + return response.raise_for_status() + + def logout(self) -> Union[requests.Response, requests.HTTPError]: + """Logout of Automation Gateway.""" + response = self._post(uri="logout") + if response.ok: + self.job.logger.info(f"Logging out of {self.host}.") + return response.json() + self.job.logger.warning(f"Failed logging out of {self.host}.") + return response.raise_for_status() + + def status(self) -> Union[requests.Response, requests.HTTPError]: + """Get Automation Gateway status.""" + response = self._get(uri="poll") + if response.ok: + self.job.logger.info(f"{self.host} polling is successful.") + return response.json() + self.job.logger.warning(f"Failed to poll {self.host}.") + return response.raise_for_status() + + def get_devices(self) -> Union[requests.Response, requests.HTTPError]: + """Get a devices.""" + response = self._get(uri="devices") + if response.ok: + self.job.logger.info(f"Pulling devices from {self.host}.") + return response.json() + self.job.logger.warning(f"Failed pulling devices from {self.host}.") + return response.raise_for_status() + + def get_device(self, device_name: str) -> Union[requests.Response, requests.HTTPError]: + """Get a device object. + + Args: + device_name (str): Device name. + + Returns: + dict: The device and its attributes. + """ + response = self._get(uri=f"devices/{device_name}") + if response.ok: + self.job.logger.info(f"Pulling {device_name} from {self.host}.") + return response.json() + self.job.logger.warning(f"Failed pulling {device_name} from {self.host}.") + return response.raise_for_status() + + def create_device( + self, device_name: str, variables: Optional[dict] + ) -> Union[requests.Response, requests.HTTPError]: + """Create a device with attributes. + + Args: + device_name (str): Device name. + variables (dict, optional): Device attributes. Defaults to {}. + + Returns: + dict: API client return message. + """ + payload = {"name": device_name, "variables": variables} + response = self._post(uri="devices", json_data=payload) + if response.ok: + self.job.logger.info(f"Creating {device_name} on {self.host}.") + return response.json() + self.job.logger.warning(f"Failed to create {device_name} on {self.host}.") + return response.raise_for_status() + + def update_device( + self, device_name: str, variables: Optional[dict] + ) -> Union[requests.Response, requests.HTTPError]: + """Update a device with attributes. + + Args: + device_name (str): Device name. + variables (dict, optional): Device attributes. Defaults to {}. + + Returns: + dict: API client return message. + """ + response = self._put(uri=f"devices/{device_name}", json_data=variables) + if response.ok: + self.job.logger.info(f"Updating {device_name} on {self.host}.") + return response.json() + self.job.logger.warning(f"Failed to update {device_name} on {self.host}.") + return response.raise_for_status() + + def delete_device(self, device_name: str) -> Union[requests.Response, requests.HTTPError]: + """Delete a device. + + Args: + device_name (str): Device name. + + Returns: + dict: API client return message. + """ + response = self._delete(uri=f"devices/{device_name}") + if response.ok: + self.job.logger.info(f"Deleting {device_name} on {self.host}.") + return response.json() + self.job.logger.warning(f"Failed to delete {device_name} on {self.host}.") + return response.raise_for_status() + + def get_groups(self) -> List[str]: + """Get a groups.""" + response = self._get(uri="groups") + if response.ok: + self.job.logger.info(f"Pulling groups from {self.host}.") + return response.json() + self.job.logger.warning(f"Failed pulling groups from {self.host}.") + return response.raise_for_status() + + def get_group(self, group_name: str) -> Union[requests.Response, requests.HTTPError]: + """Get a group object. + + Args: + group_name (str): group name. + + Returns: + dict: The group and its attributes. + """ + response = self._get(uri=f"groups/{group_name}") + if response.ok: + self.job.logger.info(f"Pulling {group_name} from {self.host}.") + return response.json() + self.job.logger.warning(f"Failed pulling {group_name} from {self.host}.") + return response.raise_for_status() + + def create_group(self, group_name: str, variables: Optional[dict]) -> Union[requests.Response, requests.HTTPError]: + """Create a group with attributes. + + Args: + group_name (str): group name. + variables (dict, optional): group attributes. Defaults to {}. + + Returns: + dict: API client return message. + """ + payload = {"name": group_name, "variables": variables} + response = self._post(uri="groups", json_data=payload) + if response.ok: + self.job.logger.info(f"Creating {group_name} on {self.host}.") + return response.json() + self.job.logger.warning(f"Failed to create {group_name} on {self.host}.") + return response.raise_for_status() + + def update_group(self, group_name: str, variables: Optional[dict]) -> Union[requests.Response, requests.HTTPError]: + """Update a group with attributes. + + Args: + group_name (str): group name. + variables (dict, optional): group attributes. Defaults to {}. + + Returns: + dict: API client return message. + """ + response = self._put(uri=f"groups/{group_name}", json_data=variables) + if response.ok: + self.job.logger.info(f"Updating {group_name} on {self.host}.") + return response.json() + self.job.logger.warning(f"Failed to update {group_name} on {self.host}.") + return response.raise_for_status() + + def delete_group(self, group_name: str) -> Union[requests.Response, requests.HTTPError]: + """Delete a group. + + Args: + group_name (str): group name. + + Returns: + dict: API client return message. + """ + response = self._delete(uri=f"groups/{group_name}") + if response.ok: + self.job.logger.info(f"Deleting {group_name} on {self.host}.") + return response.json() + self.job.logger.warning(f"Failed to delete {group_name} on {self.host}.") + return response.raise_for_status() + + def add_device_to_group(self, group_name: str, device_name: str) -> Union[requests.Response, requests.HTTPError]: + """Add a device to a group. + + Args: + group_name (str): Group name. + device_name (str): Device name. + + Returns: + Union[requests.Response, requests.HTTPError]: API client return message. + """ + device_name = [device_name] + response = self._post(uri=f"groups/{group_name}/devices", json_data=device_name) + if response.ok: + self.job.logger.info(f"Adding {device_name} to {group_name} group on {self.host}.") + return response.json() + self.job.logger.warning(f"Failed to add {device_name} to {group_name} group on {self.host}.") + return response.raise_for_status() + + def delete_device_from_group( + self, group_name: str, device_name: str + ) -> Union[requests.Response, requests.HTTPError]: + """Delete a device from a group. + + Args: + group_name (str): Group name. + device_name (str): Device name. + + Returns: + Union[requests.Response, requests.HTTPError]: API client return message. + """ + response = self._delete(uri=f"groups/{group_name}/devices/{device_name}") + if response.ok: + self.job.logger.info(f"Deleting {device_name} from {group_name} group on {self.host}.") + return response.json() + self.job.logger.warning(f"Failed to delete {device_name} from {group_name} group on {self.host}.") # nosec + return response.raise_for_status() diff --git a/nautobot_ssot/integrations/itential/constants.py b/nautobot_ssot/integrations/itential/constants.py new file mode 100644 index 000000000..03da396f6 --- /dev/null +++ b/nautobot_ssot/integrations/itential/constants.py @@ -0,0 +1,5 @@ +"""Itential SSoT constants.""" + +DELAY = 1 +RETRIES = 2 +BACKOFF = 2 diff --git a/nautobot_ssot/integrations/itential/diffsync/__init__.py b/nautobot_ssot/integrations/itential/diffsync/__init__.py new file mode 100644 index 000000000..1c67bb64d --- /dev/null +++ b/nautobot_ssot/integrations/itential/diffsync/__init__.py @@ -0,0 +1 @@ +"""Itential SSoT diffsync models and adapters.""" diff --git a/nautobot_ssot/integrations/itential/diffsync/adapters/__init__.py b/nautobot_ssot/integrations/itential/diffsync/adapters/__init__.py new file mode 100644 index 000000000..d983f25fb --- /dev/null +++ b/nautobot_ssot/integrations/itential/diffsync/adapters/__init__.py @@ -0,0 +1 @@ +"""Itential SSoT diffsync adapters.""" diff --git a/nautobot_ssot/integrations/itential/diffsync/adapters/itential.py b/nautobot_ssot/integrations/itential/diffsync/adapters/itential.py new file mode 100644 index 000000000..494bb2af5 --- /dev/null +++ b/nautobot_ssot/integrations/itential/diffsync/adapters/itential.py @@ -0,0 +1,43 @@ +"""Itential SSoT adapters.""" + +from diffsync import DiffSync + +from nautobot_ssot.integrations.itential.diffsync.models.itential import ( + ItentialAnsibleDeviceModel, + ItentialDefaultAnsibleGroupModel, +) +from nautobot_ssot.integrations.itential.clients import AutomationGatewayClient + + +class ItentialAnsibleDeviceAdapter(DiffSync): + """Itential Ansible Device Diffsync adapter.""" + + device = ItentialAnsibleDeviceModel + all_group = ItentialDefaultAnsibleGroupModel + top_level = ["all_group", "device"] + + def __init__(self, api_client: AutomationGatewayClient, job: object, sync: object, *args, **kwargs): + """Initialize Diffsync Adapter.""" + super().__init__(*args, **kwargs) + self.api_client = api_client + self.job = job + self.sync = sync + + def load(self): + """Load Adapter.""" + self.job.logger.info(f"Loading default ansible group variables from {self.api_client.host}.") + groups = self.api_client.get_groups().get("data") + + for iag_group in groups: + if iag_group.get("name") == "all": + _group = self.all_group(name=iag_group.get("name"), variables=iag_group.get("variables")) + + self.add(_group) + + self.job.logger.info(f"Loading Itential devices from {self.api_client.host} into Diffsync adapter.") + devices = self.api_client.get_devices().get("data") + + for iag_device in devices: + _device = self.device(name=iag_device.get("name"), variables=iag_device.get("variables")) + + self.add(_device) diff --git a/nautobot_ssot/integrations/itential/diffsync/adapters/nautobot.py b/nautobot_ssot/integrations/itential/diffsync/adapters/nautobot.py new file mode 100644 index 000000000..39e7fd794 --- /dev/null +++ b/nautobot_ssot/integrations/itential/diffsync/adapters/nautobot.py @@ -0,0 +1,142 @@ +"""Itential SSoT Nautobot adapters.""" + +import re +import traceback + +from diffsync import DiffSync + +from nautobot.extras.models import Status +from nautobot.dcim.models import Device + +from nautobot.extras.choices import SecretsGroupAccessTypeChoices, SecretsGroupSecretTypeChoices + +from nautobot_ssot.integrations.itential.models import AutomationGatewayModel +from nautobot_ssot.integrations.itential.diffsync.models.nautobot import ( + NautobotAnsibleDeviceModel, + NautobotDefaultAnsibleGroupModel, +) + + +class NautobotAnsibleDeviceAdapter(DiffSync): + """Nautobot => Itential Ansible Device Diffsync Adapter.""" + + device = NautobotAnsibleDeviceModel + all_group = NautobotDefaultAnsibleGroupModel + top_level = ["all_group", "device"] + + def __init__( # pylint disable=too-many-arguments + self, job: object, sync: object, gateway: AutomationGatewayModel, status: Status, *args, **kwargs + ): + """Initialize Nautobot Itential Ansible Device Diffsync adapter.""" + super().__init__(*args, **kwargs) + self.job = job + self.sync = sync + self.gateway = gateway + self.status = status + + def _is_rfc1123_compliant(self, device_name: str) -> bool: + """Check to see if a device name is RFC 1123 compliant.""" + # Check for invalid characters (anything other than alphanumerics, hypens, and periods) + if not re.search("[a-zA-Z0-9][a-zA-Z0-9-.]{0,62}$", device_name): + self.job.logger.warning(f"{device_name} has iinvalid characters.") + return False + + # RFC 1123 allows hostnames to start with a digit + label_pattern = r"[a-zA-Z0-9][a-zA-Z0-9-]{0,62}$" + + # Split device_name into labels and check each one + labels = device_name.split(".") + + for label in labels: + if not re.match(label_pattern, label) or label.endswith("-"): + self.job.logger.warning(f"{device_name} has an invalid hostname pattern.") + return False + + return True + + def _ansible_vars(self, device_obj: Device) -> dict: + """Create device variables to load into Automation Gateway.""" + # Add ansible_network_os if available + if device_obj.platform and device_obj.platform.network_driver_mappings.get("ansible"): + ansible_network_os = {"ansible_network_os": device_obj.platform.network_driver_mappings.get("ansible")} + else: + ansible_network_os = {} + + # Add device specific credentials if available + try: + ansible_username = { + "ansible_username": device_obj.secrets_group.get_secret_value( + access_type=SecretsGroupAccessTypeChoices.TYPE_GENERIC, + secret_type=SecretsGroupSecretTypeChoices.TYPE_USERNAME, + ) + } + ansible_password = { + "ansible_password": device_obj.secrets_group.get_secret_value( + access_type=SecretsGroupAccessTypeChoices.TYPE_GENERIC, + secret_type=SecretsGroupSecretTypeChoices.TYPE_PASSWORD, + ) + } + except AttributeError: + ansible_username = {} + ansible_password = {} + + # Add ansible_host + ansible_host = {"ansible_host": device_obj.primary_ip4.host} + + # Add device attributes from config_context + config_context = device_obj.get_config_context() + + return {**ansible_host, **ansible_network_os, **ansible_username, **ansible_password, **config_context} + + @property + def _default_group_vars(self) -> dict: + """Create the ansible default group variables to load into Automation Gateway.""" + try: + ansible_username = { + "ansible_username": self.gateway.gateway.secrets_group.get_secret_value( + access_type=SecretsGroupAccessTypeChoices.TYPE_GENERIC, + secret_type=SecretsGroupSecretTypeChoices.TYPE_USERNAME, + ) + } + ansible_password = { + "ansible_password": self.gateway.gateway.secrets_group.get_secret_value( + access_type=SecretsGroupAccessTypeChoices.TYPE_GENERIC, + secret_type=SecretsGroupSecretTypeChoices.TYPE_PASSWORD, + ) + } + except AttributeError: + ansible_username = {} + ansible_password = {} + + return {**ansible_username, **ansible_password} + + def load(self): + """Load Nautobot Diffsync adapter.""" + self.job.logger.info("Loading default ansible group variables from Nautobot.") + _group = self.all_group(name="all", variables=self._default_group_vars) + self.add(_group) + + self.job.logger.info("Loading locations from Nautobot.") + location = self.gateway.location + locations = location.descendants(include_self=True) if self.gateway.location_descendants else location + + self.job.logger.info("Loading devices from Nautobot.") + devices = Device.objects.filter(location__in=locations, status=self.status.pk).exclude(primary_ip4=None) + + for nb_device in devices: + try: + if self._is_rfc1123_compliant(nb_device.name): + device_vars = self._ansible_vars(nb_device) + _device = self.device(name=nb_device.name, variables=device_vars) + + self.add(_device) + else: + raise Exception( # pylint: disable=broad-exception-raised + f"{nb_device.name} is not RFC 1123 compliant." + ) + except Exception as exc: # pylint: disable=broad-exception-caught + stacktrace = traceback.format_exc() + self.job.logger.warning(f"{nb_device.name} was not added to inventory due to an error.") + self.job.logger.warning( + f"An exception ocurred: " f"`{type(exec).__name__}: {exc}`\n```\n{stacktrace}\n```" + ) diff --git a/nautobot_ssot/integrations/itential/diffsync/models/__init__.py b/nautobot_ssot/integrations/itential/diffsync/models/__init__.py new file mode 100644 index 000000000..29a192ccc --- /dev/null +++ b/nautobot_ssot/integrations/itential/diffsync/models/__init__.py @@ -0,0 +1 @@ +"""Itential SSoT diffsync models.""" diff --git a/nautobot_ssot/integrations/itential/diffsync/models/base.py b/nautobot_ssot/integrations/itential/diffsync/models/base.py new file mode 100644 index 000000000..8bfcb1081 --- /dev/null +++ b/nautobot_ssot/integrations/itential/diffsync/models/base.py @@ -0,0 +1,26 @@ +"""Itential SSoT shared diffsync models.""" + +from typing import Optional +from diffsync import DiffSyncModel + + +class BaseAnsibleDeviceDiffsyncModel(DiffSyncModel): + """Itential Ansible Device DiffSyncModel.""" + + _modelname = "device" + _identifiers = ("name",) + _attributes = ("variables",) + + name: str + variables: Optional[dict] + + +class BaseAnsibleDefaultGroupDiffsyncModel(DiffSyncModel): + """Itential Default Ansible Group DiffsyncModel.""" + + _modelname = "all_group" + _identifiers = ("name",) + _attributes = ("variables",) + + name: str + variables: Optional[dict] diff --git a/nautobot_ssot/integrations/itential/diffsync/models/itential.py b/nautobot_ssot/integrations/itential/diffsync/models/itential.py new file mode 100644 index 000000000..4eb82393b --- /dev/null +++ b/nautobot_ssot/integrations/itential/diffsync/models/itential.py @@ -0,0 +1,44 @@ +"""Itential SSoT models.""" + +from nautobot_ssot.integrations.itential.diffsync.models import base + + +class ItentialAnsibleDeviceModel(base.BaseAnsibleDeviceDiffsyncModel): + """Itential Ansible Device DiffSyncModel.""" + + @classmethod + def create(cls, diffsync, ids, attrs): + """Create device in Automation Gateway.""" + diffsync.api_client.create_device(device_name=ids.get("name"), variables=attrs.get("variables")) + diffsync.api_client.add_device_to_group(group_name="all", device_name=ids.get("name")) + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + def delete(self): + """Delete device in Automation Gateway.""" + self.diffsync.api_client.delete_device_from_group(group_name="all", device_name=self.name) + self.diffsync.api_client.delete_device(device_name=self.name) + return super().delete() + + def update(self, attrs): + """Update device in Automation Gateway.""" + self.diffsync.api_client.update_device(device_name=self.name, variables=attrs.get("variables")) + return super().update(attrs) + + +class ItentialDefaultAnsibleGroupModel(base.BaseAnsibleDefaultGroupDiffsyncModel): + """Itential Default Ansible Group DiffsyncModel.""" + + @classmethod + def create(cls, diffsync, ids, attrs): + """Create default group in Automation Gateway.""" + diffsync.api_client.create_group(group_name=ids.get("name"), variables=attrs.get("variables")) + return super().create(ids=ids, diffsync=diffsync, attrs=attrs) + + def update(self, attrs): + """Update default group in Automation Gateway.""" + self.diffsync.api_client.update_device(device_name=self.name, variables=attrs.get("variables")) + return super().update(attrs) + + def delete(self): + """Delete default group in Automation Gateway.""" + raise NotImplementedError diff --git a/nautobot_ssot/integrations/itential/diffsync/models/nautobot.py b/nautobot_ssot/integrations/itential/diffsync/models/nautobot.py new file mode 100644 index 000000000..6bfef599d --- /dev/null +++ b/nautobot_ssot/integrations/itential/diffsync/models/nautobot.py @@ -0,0 +1,37 @@ +"""Itential SSoT Nautobot models.""" + +from nautobot_ssot.integrations.itential.diffsync.models import base + + +class NautobotAnsibleDeviceModel(base.BaseAnsibleDeviceDiffsyncModel): + """Nautobot => Itential Ansible Device DiffSyncModel.""" + + @classmethod + def create(cls, diffsync, ids, attrs): + """Create device in Nautobot..""" + raise NotImplementedError + + def update(self, attrs): + """Update device in Nautobot.""" + raise NotImplementedError + + def delete(self): + """Delete device in Nautobot.""" + raise NotImplementedError + + +class NautobotDefaultAnsibleGroupModel(base.BaseAnsibleDefaultGroupDiffsyncModel): + """Nautobot => Itential Default Ansible Group DiffsyncModel.""" + + @classmethod + def create(cls, diffsync, ids, attrs): + """Create default group in Nautobot..""" + raise NotImplementedError + + def update(self, attrs): + """Update default group in Nautobot.""" + raise NotImplementedError + + def delete(self): + """Delete default group in Nautobot.""" + raise NotImplementedError diff --git a/nautobot_ssot/integrations/itential/filters.py b/nautobot_ssot/integrations/itential/filters.py new file mode 100644 index 000000000..64547fc84 --- /dev/null +++ b/nautobot_ssot/integrations/itential/filters.py @@ -0,0 +1,17 @@ +"""Itential SSoT Filters.""" + +from nautobot.apps.filters import BaseFilterSet, SearchFilter + +from nautobot_ssot.integrations.itential import models + + +class AutomationGatewayModelFilterSet(BaseFilterSet): + """AutomationGatewayModel FilterSet.""" + + q = SearchFilter(filter_predicates={"name": "icontains"}) + + class Meta: + """Meta class definition.""" + + model = models.AutomationGatewayModel + fields = ["name"] diff --git a/nautobot_ssot/integrations/itential/forms.py b/nautobot_ssot/integrations/itential/forms.py new file mode 100644 index 000000000..ce59ce0cb --- /dev/null +++ b/nautobot_ssot/integrations/itential/forms.py @@ -0,0 +1,43 @@ +"""Itential SSoT Forms.""" + +from django import forms + +from nautobot.apps.forms import BootstrapMixin, BulkEditForm, NautobotModelForm + +from nautobot_ssot.integrations.itential import models + + +class AutomationGatewayModelBulkEditForm(BootstrapMixin, BulkEditForm): + """AutomationGatewayModel BulkEdit form.""" + + pk = forms.ModelMultipleChoiceField( + queryset=models.AutomationGatewayModel.objects.all(), widget=forms.MultipleHiddenInput + ) + enabled = forms.BooleanField(required=False) + + class Meta: + """Meta class definition.""" + + nullable_fields = [] + + +class AutomationGatewayModelFilterForm(BootstrapMixin, forms.Form): + """AutotmationGatewayModel FilterForm form.""" + + class Meta: + """Meta class definition.""" + + model = models.AutomationGatewayModel + q = forms.CharField(required=False, label="Search") + name = forms.CharField(required=False) + enabled = forms.BooleanField(required=False) + + +class AutomationGatewayModelForm(NautobotModelForm): # pylint: disable=too-many-ancestors + """AutomationGatewayModel Form form.""" + + class Meta: + """Meta class definition.""" + + model = models.AutomationGatewayModel + fields = ["name", "description", "location", "location_descendants", "gateway", "enabled"] diff --git a/nautobot_ssot/integrations/itential/jobs.py b/nautobot_ssot/integrations/itential/jobs.py new file mode 100644 index 000000000..5b9199e31 --- /dev/null +++ b/nautobot_ssot/integrations/itential/jobs.py @@ -0,0 +1,78 @@ +"""Itential SSoT Jobs.""" + +from nautobot.extras.models import Status +from nautobot.extras.jobs import ObjectVar + +from nautobot.extras.choices import SecretsGroupAccessTypeChoices, SecretsGroupSecretTypeChoices +from nautobot_ssot.jobs.base import DataTarget + +from nautobot_ssot.integrations.itential.models import AutomationGatewayModel +from nautobot_ssot.integrations.itential.clients import AutomationGatewayClient +from nautobot_ssot.integrations.itential.diffsync.adapters.itential import ItentialAnsibleDeviceAdapter +from nautobot_ssot.integrations.itential.diffsync.adapters.nautobot import NautobotAnsibleDeviceAdapter + + +name = "SSoT - Itential" # pylint: disable=invalid-name + + +class ItentialAutomationGatewayDataTarget(DataTarget): # pylint: disable=too-many-instance-attributes + """Job syncing Nautobot to Itential Automation Gateway.""" + + gateway = ObjectVar(model=AutomationGatewayModel, description="Choose a gateway to sync to.", required=True) + status = ObjectVar(model=Status, description="Choose a device status to sync.", required=True) + + class Meta: + """Meta class definition.""" + + name = "Nautobot ⟹ Itential Automation Gateway" + data_target = "Itential Automation Gateway" + # data_source_icon = static("nautobot_ssot_itential/itential.png") + description = "Sync data from Nautobot into Itential Automation Gateway." + has_sensitive_variables = False + + def load_source_adapter(self): + """Load Nautobot adapter.""" + self.source_adapter = NautobotAnsibleDeviceAdapter( + job=self, + sync=self.sync, + gateway=self.gateway, + status=self.status, + ) + self.logger.info("Loading data from Nautobot.") + self.source_adapter.load() + + def load_target_adapter(self): + """Load Itential adapter.""" + if not self.gateway.enabled: + self.logger.warning(f"{self.gateway.gateway.remote_url} is not enabled to sync inventory.") + return + + api_client = AutomationGatewayClient( + host=self.gateway.gateway.remote_url, + username=self.gateway.gateway.secrets_group.get_secret_value( + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_USERNAME, + ), + password=self.gateway.gateway.secrets_group.get_secret_value( + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_PASSWORD, + ), + job=self, + verify_ssl=self.gateway.gateway.verify_ssl, + ) + api_client.login() + + self.target_adapter = ItentialAnsibleDeviceAdapter(job=self, sync=self.sync, api_client=api_client) + self.logger.info("Loading data from Itential.") + self.target_adapter.load() + + def run(self, dryrun, memory_profiling, gateway, status, *args, **kwargs): # pylint: disable=arguments-differ + """Execute sync.""" + self.gateway = gateway + self.status = status + self.dryrun = dryrun + self.memory_profiling = memory_profiling + super().run(dryrun=self.dryrun, memory_profiling=self.memory_profiling, *args, **kwargs) + + +jobs = [ItentialAutomationGatewayDataTarget] diff --git a/nautobot_ssot/integrations/itential/models.py b/nautobot_ssot/integrations/itential/models.py new file mode 100644 index 000000000..62f994e00 --- /dev/null +++ b/nautobot_ssot/integrations/itential/models.py @@ -0,0 +1,49 @@ +"""Models for Nautobot Itential.""" + +# Django imports +from django.db import models + +# Nautobot imports +from nautobot.apps.models import PrimaryModel +from nautobot.dcim.models import Location +from nautobot.extras.models import ExternalIntegration + + +class AutomationGatewayModel(PrimaryModel): # pylint: disable=too-many-ancestors + """Automation Gateway model for Nautobot Itential app.""" + + name = models.CharField(max_length=255, unique=True) + description = models.CharField(max_length=512, blank=True) + location = models.ForeignKey( + Location, + on_delete=models.CASCADE, + verbose_name="Location", + help_text="Automation Gateway manages devices from this location.", + ) + location_descendants = models.BooleanField( + default=True, + verbose_name="Include Location Descendants", + help_text="Include descendant locations.", + ) + gateway = models.OneToOneField( + ExternalIntegration, + on_delete=models.CASCADE, + verbose_name="Automation Gateway", + help_text="Automation Gateway server defined from external integration model.", + ) + enabled = models.BooleanField( + default=False, + verbose_name="Automation Gateway enabled", + help_text="Enable or Disable the Automation Gateway from being managed by Nautobot.", + ) + + class Meta: + """Meta class.""" + + ordering = ["name", "location"] + verbose_name = "Automation Gateway Management" + verbose_name_plural = "Automation Gateway Management" + + def __str__(self): + """Stringify instance.""" + return self.name diff --git a/nautobot_ssot/integrations/itential/navigation.py b/nautobot_ssot/integrations/itential/navigation.py new file mode 100644 index 000000000..68bf155fa --- /dev/null +++ b/nautobot_ssot/integrations/itential/navigation.py @@ -0,0 +1,12 @@ +"""Itential SSoT Navigation.""" + +from nautobot.apps.ui import NavMenuItem + + +nav_items = [ + NavMenuItem( + link="plugins:nautobot_ssot:automationgatewaymodel_list", + name="Itential Automation Gateway", + permissions=["nautobot_ssot.view_sync"], + ), +] diff --git a/nautobot_ssot/integrations/itential/tables.py b/nautobot_ssot/integrations/itential/tables.py new file mode 100644 index 000000000..a8c3f9ba2 --- /dev/null +++ b/nautobot_ssot/integrations/itential/tables.py @@ -0,0 +1,25 @@ +"""Itential SSoT tables.""" + +import django_tables2 as tables + +from nautobot.apps.tables import ( + BaseTable, + ButtonsColumn, + ToggleColumn, +) + +from nautobot_ssot.integrations.itential import models + + +class AutomationGatewayModelTable(BaseTable): + """AutomationGatewayModel Table.""" + + pk = ToggleColumn() + name = tables.LinkColumn() + actions = ButtonsColumn(models.AutomationGatewayModel) + + class Meta: + """Meta class definition.""" + + model = models.AutomationGatewayModel + fields = ["name", "description", "location", "location_descendants", "gateway", "enabled"] diff --git a/nautobot_ssot/integrations/itential/urls.py b/nautobot_ssot/integrations/itential/urls.py new file mode 100644 index 000000000..389495c05 --- /dev/null +++ b/nautobot_ssot/integrations/itential/urls.py @@ -0,0 +1,10 @@ +"""Itential SSoT URL's.""" + +from nautobot.apps.urls import NautobotUIViewSetRouter + +from nautobot_ssot.integrations.itential import views + +router = NautobotUIViewSetRouter() +router.register("itential/automation-gateway", views.AutomationGatewayModelUIViewSet) + +urlpatterns = router.urls diff --git a/nautobot_ssot/integrations/itential/views.py b/nautobot_ssot/integrations/itential/views.py new file mode 100644 index 000000000..30d9774c3 --- /dev/null +++ b/nautobot_ssot/integrations/itential/views.py @@ -0,0 +1,18 @@ +"""Itential SSoT Views.""" + +from nautobot.apps import views +from nautobot_ssot.integrations.itential import forms, filters, tables, models +from nautobot_ssot.integrations.itential.api import serializers + + +class AutomationGatewayModelUIViewSet(views.NautobotUIViewSet): + """Automation Gateway Model UI ViewSet class.""" + + bulk_update_form_class = forms.AutomationGatewayModelBulkEditForm + filterset_class = filters.AutomationGatewayModelFilterSet + filterset_form_class = forms.AutomationGatewayModelFilterForm + form_class = forms.AutomationGatewayModelForm + queryset = models.AutomationGatewayModel.objects.all() + serializer_class = serializers.AutomationGatewayModelSerializer + table_class = tables.AutomationGatewayModelTable + lookup_field = "pk" diff --git a/nautobot_ssot/jobs/examples.py b/nautobot_ssot/jobs/examples.py index 3362e5900..423f471d1 100644 --- a/nautobot_ssot/jobs/examples.py +++ b/nautobot_ssot/jobs/examples.py @@ -3,17 +3,21 @@ # Skip colon check for multiple statements on one line. # flake8: noqa: E701 +try: + from typing_extensions import TypedDict # Python<3.9 +except ImportError: + from typing import TypedDict # Python>=3.9 + from typing import Optional, Mapping, List -from uuid import UUID from django.contrib.contenttypes.models import ContentType from django.templatetags.static import static from django.urls import reverse -from nautobot.dcim.models import Device, DeviceType, Location, LocationType, Manufacturer, Platform +from nautobot.dcim.models import Device, DeviceType, Interface, Location, LocationType, Manufacturer, Platform from nautobot.extras.choices import SecretsGroupAccessTypeChoices, SecretsGroupSecretTypeChoices from nautobot.extras.jobs import ObjectVar, StringVar -from nautobot.extras.models import ExternalIntegration, Role -from nautobot.ipam.models import Prefix +from nautobot.extras.models import ExternalIntegration, Role, Status +from nautobot.ipam.models import IPAddress, Namespace, Prefix from nautobot.tenancy.models import Tenant from diffsync import DiffSync @@ -49,8 +53,12 @@ class LocationTypeModel(NautobotModel): parent__name: Optional[str] content_types: List[ContentTypeDict] = [] - # Not in _attributes or _identifiers, hence not included in diff calculations - pk: Optional[UUID] + +class LocationDict(TypedDict): + """This typed dict is for M2M Locations.""" + + name: str + location_type__name: str class LocationModel(NautobotModel): @@ -79,9 +87,6 @@ class LocationModel(NautobotModel): tenant__name: Optional[str] description: str - # Not in _attributes or _identifiers, hence not included in diff calculations - pk: Optional[UUID] - class RoleModel(NautobotModel): """Shared data model representing a Role in either of the local or remote Nautobot instances.""" @@ -95,8 +100,32 @@ class RoleModel(NautobotModel): name: str content_types: List[ContentTypeDict] = [] - # Not in _attributes or _identifiers, hence not included in diff calculations - pk: Optional[UUID] + +class StatusModel(NautobotModel): + """Shared data model representing a Status in either of the local or remote Nautobot instances.""" + + # Metadata about this model + _model = Status + _modelname = "status" + _identifiers = ("name",) + _attributes = ("content_types", "color") + + name: str + color: str + content_types: List[ContentTypeDict] = [] + + +class NamespaceModel(NautobotModel): + """Shared data model representing a Namespace in either of the local or remote Nautobot instance.""" + + # Metadata about this model + _model = Namespace + _modelname = "namespace" + _identifiers = ("name",) + _attributes = ("description",) + + name: str + description: Optional[str] = "" class PrefixModel(NautobotModel): @@ -107,17 +136,41 @@ class PrefixModel(NautobotModel): _modelname = "prefix" _identifiers = ("network", "prefix_length", "tenant__name") # To keep this example simple, we don't include **all** attributes of a Prefix here. But you could! - _attributes = ("description", "status__name") + _attributes = ("description", "namespace__name", "status__name", "locations") # Data type declarations for all identifiers and attributes network: str + namespace__name: str prefix_length: int tenant__name: Optional[str] status__name: str description: str - # Not in _attributes or _identifiers, hence not included in diff calculations - pk: Optional[UUID] + locations: List[LocationDict] = [] + + +class IPAddressModel(NautobotModel): + """Shared data model representing an IPAddress in either of the local or remote Nautobot instances.""" + + # Metadata about this model + _model = IPAddress + _modelname = "ipaddress" + _identifiers = ("host", "mask_length", "parent__network", "parent__prefix_length", "parent__namespace__name") + _attributes = ( + "status__name", + "ip_version", + "tenant__name", + ) + + # Data type declarations for all identifiers and attributes + host: str + mask_length: int + parent__network: str + parent__prefix_length: int + parent__namespace__name: str + status__name: str + ip_version: int + tenant__name: Optional[str] class TenantModel(NautobotModel): @@ -132,8 +185,6 @@ class TenantModel(NautobotModel): name: str prefixes: List[PrefixModel] = [] - pk: Optional[UUID] - class DeviceTypeModel(NautobotModel): """Shared data model representing a DeviceType in either of the local or remote Nautobot instances.""" @@ -149,9 +200,6 @@ class DeviceTypeModel(NautobotModel): u_height: int is_full_depth: bool - # Not in _attributes or _identifiers, hence not included in diff calculations - pk: Optional[UUID] - class ManufacturerModel(NautobotModel): """Shared data model representing a Manufacturer in either of the local or remote Nautobot instances.""" @@ -166,9 +214,6 @@ class ManufacturerModel(NautobotModel): description: str device_types: List[DeviceTypeModel] = [] - # Not in _attributes or _identifiers, hence not included in diff calculations - pk: Optional[UUID] - class PlatformModel(NautobotModel): """Shared data model representing a Platform in either of the local or remote Nautobot instances.""" @@ -204,7 +249,7 @@ class DeviceModel(NautobotModel): "tenant__name", "asset_tag", ) - # _children = {"interface": "interfaces"} + _children = {"interface": "interfaces"} name: str location__name: str @@ -219,6 +264,41 @@ class DeviceModel(NautobotModel): status__name: str tenant__name: Optional[str] asset_tag: Optional[str] + interfaces: List["InterfaceModel"] = [] + + +class InterfaceModel(NautobotModel): + """Shared data model representing an Interface in either of the local or remote Nautobot instances.""" + + # Metadata about this model + _model = Interface + _modelname = "interface" + _identifiers = ("name", "device__name") + _attributes = ( + "device__location__name", + "device__location__parent__name", + "description", + "enabled", + "mac_address", + "mgmt_only", + "mtu", + "type", + "status__name", + ) + _children = {} + + # Data type declarations for all identifiers and attributes + device__name: str + device__location__name: str + device__location__parent__name: str + description: Optional[str] + enabled: bool + mac_address: Optional[str] + mgmt_only: bool + mtu: Optional[int] + name: str + type: str + status__name: str class LocationRemoteModel(LocationModel): @@ -312,6 +392,7 @@ def create(cls, diffsync, ids, attrs): "network": ids["network"], "prefix_length": ids["prefix_length"], "tenant": {"name": ids["tenant__name"]} if ids["tenant__name"] else None, + "namespace": {"name": attrs["namespace__name"]} if attrs["namespace__name"] else None, "description": attrs["description"], "status": attrs["status__name"], }, @@ -352,15 +433,31 @@ class NautobotRemote(DiffSync): locationtype = LocationTypeModel location = LocationRemoteModel tenant = TenantRemoteModel + namespace = NamespaceModel prefix = PrefixRemoteModel + ipaddress = IPAddressModel manufacturer = ManufacturerModel device_type = DeviceTypeModel platform = PlatformModel role = RoleModel + status = StatusModel device = DeviceModel + interface = InterfaceModel # Top-level class labels, i.e. those classes that are handled directly rather than as children of other models - top_level = ["tenant", "locationtype", "location", "manufacturer", "platform", "role", "device"] + top_level = [ + "tenant", + "status", + "locationtype", + "location", + "manufacturer", + "platform", + "role", + "device", + "namespace", + "prefix", + "ipaddress", + ] def __init__(self, *args, url=None, token=None, job=None, **kwargs): """Instantiate this class, but do not load data immediately from the remote system. @@ -385,24 +482,28 @@ def __init__(self, *args, url=None, token=None, job=None, **kwargs): def _get_api_data(self, url_path: str) -> Mapping: """Returns data from a url_path using pagination.""" - data = requests.get(f"{self.url}/{url_path}", headers=self.headers, params={"limit": 0}, timeout=60).json() + data = requests.get(f"{self.url}/{url_path}", headers=self.headers, params={"limit": 200}, timeout=60).json() result_data = data["results"] while data["next"]: - data = requests.get(data["next"], headers=self.headers, params={"limit": 0}, timeout=60).json() + data = requests.get(data["next"], headers=self.headers, params={"limit": 200}, timeout=60).json() result_data.extend(data["results"]) return result_data def load(self): """Load data from the remote Nautobot instance.""" + self.load_statuses() self.load_location_types() self.load_locations() self.load_roles() self.load_tenants() + self.load_namespaces() self.load_prefixes() + self.load_ipaddresses() self.load_manufacturers() self.load_device_types() self.load_platforms() self.load_devices() + self.load_interfaces() def load_location_types(self): """Load LocationType data from the remote Nautobot instance.""" @@ -448,6 +549,18 @@ def load_roles(self): ) self.add(role) + def load_statuses(self): + """Load Statuses data from the remote Nautobot instance.""" + for status_entry in self._get_api_data("api/extras/statuses/?depth=1"): + content_types = self.get_content_types(status_entry) + status = self.status( + name=status_entry["name"], + color=status_entry["color"], + content_types=content_types, + pk=status_entry["id"], + ) + self.add(status) + def load_tenants(self): """Load Tenants data from the remote Nautobot instance.""" for tenant_entry in self._get_api_data("api/tenancy/tenants/?depth=1"): @@ -457,13 +570,28 @@ def load_tenants(self): ) self.add(tenant) + def load_namespaces(self): + """Load Namespaces data from remote Nautobot instance.""" + for namespace_entry in self._get_api_data("api/ipam/namespaces/?depth=1"): + namespace = self.namespace( + name=namespace_entry["name"], + description=namespace_entry["description"], + pk=namespace_entry["id"], + ) + self.add(namespace) + def load_prefixes(self): """Load Prefixes data from the remote Nautobot instance.""" - for prefix_entry in self._get_api_data("api/ipam/prefixes/?depth=1"): + for prefix_entry in self._get_api_data("api/ipam/prefixes/?depth=2"): prefix = self.prefix( network=prefix_entry["network"], prefix_length=prefix_entry["prefix_length"], + namespace__name=prefix_entry["namespace"]["name"], description=prefix_entry["description"], + locations=[ + {"name": x["name"], "location_type__name": x["location_type"]["name"]} + for x in prefix_entry["locations"] + ], status__name=prefix_entry["status"]["name"] if prefix_entry["status"].get("name") else "Active", tenant__name=prefix_entry["tenant"]["name"] if prefix_entry["tenant"] else "", pk=prefix_entry["id"], @@ -471,6 +599,23 @@ def load_prefixes(self): self.add(prefix) self.job.logger.debug(f"Loaded {prefix} from remote Nautobot instance") + def load_ipaddresses(self): + """Load IPAddresses data from the remote Nautobot instance.""" + for ipaddr_entry in self._get_api_data("api/ipam/ip-addresses/?depth=2"): + ipaddr = self.ipaddress( + host=ipaddr_entry["host"], + mask_length=ipaddr_entry["mask_length"], + parent__network=ipaddr_entry["parent"]["network"], + parent__prefix_length=ipaddr_entry["parent"]["prefix_length"], + parent__namespace__name=ipaddr_entry["parent"]["namespace"]["name"], + status__name=ipaddr_entry["status"]["name"], + ip_version=ipaddr_entry["ip_version"], + tenant__name=ipaddr_entry["tenant"]["name"] if ipaddr_entry.get("tenant") else "", + pk=ipaddr_entry["id"], + ) + self.add(ipaddr) + self.job.logger.debug(f"Loaded {ipaddr} from remote Nautobot instance") + def load_manufacturers(self): """Load Manufacturers data from the remote Nautobot instance.""" for manufacturer in self._get_api_data("api/dcim/manufacturers/?depth=1"): @@ -480,6 +625,7 @@ def load_manufacturers(self): pk=manufacturer["id"], ) self.add(manufacturer) + self.job.logger.debug(f"Loaded {manufacturer} from remote Nautobot instance") def load_device_types(self): """Load DeviceTypes data from the remote Nautobot instance.""" @@ -495,6 +641,7 @@ def load_device_types(self): pk=device_type["id"], ) self.add(devicetype) + self.job.logger.debug(f"Loaded {devicetype} from remote Nautobot instance") manufacturer.add_child(devicetype) except ObjectNotFound: self.job.logger.debug(f"Unable to find Manufacturer {device_type['manufacturer']['name']}") @@ -511,6 +658,7 @@ def load_platforms(self): pk=platform["id"], ) self.add(platform) + self.job.logger.debug(f"Loaded {platform} from remote Nautobot instance") def load_devices(self): """Load Devices data from the remote Nautobot instance.""" @@ -536,6 +684,42 @@ def load_devices(self): pk=device["id"], ) self.add(device) + self.job.logger.debug(f"Loaded {device} from remote Nautobot instance") + + def load_interfaces(self): + """Load Interfaces data from the remote Nautobot instance.""" + self.job.logger.info("Pulling data from remote Nautobot instance for Interfaces.") + for interface in self._get_api_data("api/dcim/interfaces/?depth=3"): + try: + dev = self.get( + self.device, + { + "name": interface["device"]["name"], + "location__name": interface["device"]["location"]["name"], + "location__parent__name": interface["device"]["location"]["parent"]["name"], + }, + ) + new_interface = self.interface( + name=interface["name"], + device__name=interface["device"]["name"], + device__location__name=interface["device"]["location"]["name"], + device__location__parent__name=interface["device"]["location"]["parent"]["name"], + description=interface["description"], + enabled=interface["enabled"], + mac_address=interface["mac_address"], + mgmt_only=interface["mgmt_only"], + mtu=interface["mtu"], + type=interface["type"]["value"], + status__name=interface["status"]["name"], + pk=interface["id"], + ) + self.add(new_interface) + self.job.logger.debug( + f"Loaded {new_interface} for {interface['device']['name']} from remote Nautobot instance" + ) + dev.add_child(new_interface) + except ObjectNotFound: + self.job.logger.warning(f"Unable to find Device {interface['device']['name']} loaded.") def get_content_types(self, entry): """Create list of dicts of ContentTypes. @@ -582,15 +766,31 @@ class NautobotLocal(NautobotAdapter): locationtype = LocationTypeModel location = LocationModel tenant = TenantModel + namespace = NamespaceModel prefix = PrefixModel + ipaddress = IPAddressModel manufacturer = ManufacturerModel device_type = DeviceTypeModel platform = PlatformModel role = RoleModel + status = StatusModel device = DeviceModel + interface = InterfaceModel # Top-level class labels, i.e. those classes that are handled directly rather than as children of other models - top_level = ["tenant", "locationtype", "location", "manufacturer", "platform", "role", "device"] + top_level = [ + "tenant", + "status", + "locationtype", + "location", + "manufacturer", + "platform", + "role", + "device", + "namespace", + "prefix", + "ipaddress", + ] # The actual Data Source and Data Target Jobs are relatively simple to implement @@ -605,6 +805,7 @@ class ExampleDataSource(DataSource): queryset=ExternalIntegration.objects.all(), display_field="display", label="Nautobot Demo Instance", + required=False, ) source_url = StringVar( description="Remote Nautobot instance to load Sites and Regions from", default="https://demo.nautobot.com" @@ -630,10 +831,18 @@ class Meta: def data_mappings(cls): """This Job maps Region and Site objects from the remote system to the local system.""" return ( - DataMapping("Region (remote)", None, "Region (local)", reverse("dcim:location_list")), - DataMapping("Site (remote)", None, "Site (local)", reverse("dcim:location_list")), + DataMapping("LocationType (remote)", None, "LocationType (local)", reverse("dcim:locationtype_list")), + DataMapping("Location (remote)", None, "Location (local)", reverse("dcim:location_list")), + DataMapping("Role (remote)", None, "Role (local)", reverse("extras:role_list")), + DataMapping("Namespace (remote)", None, "Namespace (local)", reverse("ipam:namespace_list")), DataMapping("Prefix (remote)", None, "Prefix (local)", reverse("ipam:prefix_list")), + DataMapping("IPAddress (remote)", None, "IPAddress (local)", reverse("ipam:ipaddress_list")), DataMapping("Tenant (remote)", None, "Tenant (local)", reverse("tenancy:tenant_list")), + DataMapping("DeviceType (remote)", None, "DeviceType (local)", reverse("dcim:devicetype_list")), + DataMapping("Manufacturer (remote)", None, "Manufacturer (local)", reverse("dcim:manufacturer_list")), + DataMapping("Platform (remote)", None, "Platform (local)", reverse("dcim:platform_list")), + DataMapping("Device (remote)", None, "Device (local)", reverse("dcim:device_list")), + DataMapping("Interface (remote)", None, "Interface (local)", reverse("dcim:interface_list")), ) def run( # pylint: disable=too-many-arguments, arguments-differ @@ -677,25 +886,10 @@ def load_target_adapter(self): """Method to instantiate and load the TARGET adapter into `self.target_adapter`.""" self.target_adapter = NautobotLocal(job=self, sync=self.sync) self.target_adapter.load() - self.logger.info(f"Found {self.target_adapter.count('region')} regions") def lookup_object(self, model_name, unique_id): """Look up a Nautobot object based on the DiffSync model name and unique ID.""" - if model_name == "region": - try: - return Location.objects.get( - name=unique_id, location_type=LocationType.objects.get_or_create(name="Region")[0] - ) - except Location.DoesNotExist: - pass - elif model_name == "site": - try: - return Location.objects.get( - name=unique_id, location_type=LocationType.objects.get_or_create(name="Site")[0] - ) - except Location.DoesNotExist: - pass - elif model_name == "prefix": + if model_name == "prefix": try: return Prefix.objects.get( prefix=unique_id.split("__")[0], tenant__name=unique_id.split("__")[1] or None @@ -735,10 +929,18 @@ class Meta: def data_mappings(cls): """This Job maps Region and Site objects from the local system to the remote system.""" return ( - DataMapping("Region (local)", reverse("dcim:location_list"), "Region (remote)", None), - DataMapping("Site (local)", reverse("dcim:location_list"), "Site (remote)", None), + DataMapping("LocationType (local)", reverse("dcim:locationtype_list"), "LocationType (remote)", None), + DataMapping("Location (local)", reverse("dcim:location_list"), "Location (remote)", None), + DataMapping("Role (local)", reverse("extras:role_list"), "Role (remote)", None), + DataMapping("Namespace (local)", reverse("ipam:prefix_list"), "Namespace (remote)", None), DataMapping("Prefix (local)", reverse("ipam:prefix_list"), "Prefix (remote)", None), + DataMapping("IPAddress (local)", reverse("ipam:ipaddress_list"), "IPAddress (remote)", None), DataMapping("Tenant (local)", reverse("tenancy:tenant_list"), "Tenant (remote)", None), + DataMapping("DeviceType (local)", reverse("dcim:devicetype_list"), "DeviceType (remote)", None), + DataMapping("Manufacturer (local)", reverse("dcim:manufacturer_list"), "Manufacturer (remote)", None), + DataMapping("Platform (local)", reverse("dcim:platform_list"), "Platform (remote)", None), + DataMapping("Device (local)", reverse("dcim:device_list"), "Device (remote)", None), + DataMapping("Interface (local)", reverse("dcim:interface_list"), "Interface (remote)", None), ) def load_source_adapter(self): @@ -753,21 +955,7 @@ def load_target_adapter(self): def lookup_object(self, model_name, unique_id): """Look up a Nautobot object based on the DiffSync model name and unique ID.""" - if model_name == "region": - try: - return Location.objects.get( - name=unique_id, location_type=LocationType.objects.get_or_create(name="Region")[0] - ) - except Location.DoesNotExist: - pass - elif model_name == "site": - try: - return Location.objects.get( - name=unique_id, location_type=LocationType.objects.get_or_create(name="Site") - ) - except Location.DoesNotExist: - pass - elif model_name == "prefix": + if model_name == "prefix": try: return Prefix.objects.get( prefix=unique_id.split("__")[0], tenant__name=unique_id.split("__")[1] or None diff --git a/nautobot_ssot/migrations/0009_ssotconfig_ssotinfobloxconfig.py b/nautobot_ssot/migrations/0009_ssotconfig_ssotinfobloxconfig.py new file mode 100644 index 000000000..ae14c1560 --- /dev/null +++ b/nautobot_ssot/migrations/0009_ssotconfig_ssotinfobloxconfig.py @@ -0,0 +1,103 @@ +# Generated by Django 3.2.23 on 2024-06-17 14:33 + +import django.core.serializers.json +from django.db import migrations, models +import django.db.models.deletion +import nautobot.core.models.fields +import nautobot.extras.models.mixins +import nautobot_ssot.integrations.infoblox.models +import uuid + + +class Migration(migrations.Migration): + + dependencies = [ + ("extras", "0102_set_null_objectchange_contenttype"), + ("nautobot_ssot", "0008_auto_20240110_1019"), + ] + + operations = [ + migrations.CreateModel( + name="SSOTConfig", + fields=[ + ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False)), + ], + options={ + "managed": False, + "default_permissions": ("view",), + }, + ), + migrations.CreateModel( + name="SSOTInfobloxConfig", + fields=[ + ( + "id", + models.UUIDField( + default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True + ), + ), + ("created", models.DateTimeField(auto_now_add=True, null=True)), + ("last_updated", models.DateTimeField(auto_now=True, null=True)), + ( + "_custom_field_data", + models.JSONField(blank=True, default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder), + ), + ("name", models.CharField(max_length=255, unique=True)), + ("description", models.CharField(blank=True, max_length=255)), + ("infoblox_wapi_version", models.CharField(default="v2.12", max_length=255)), + ("enable_sync_to_infoblox", models.BooleanField(default=False)), + ("enable_sync_to_nautobot", models.BooleanField(default=True)), + ("import_ip_addresses", models.BooleanField(default=False)), + ("import_subnets", models.BooleanField(default=False)), + ("import_vlan_views", models.BooleanField(default=False)), + ("import_vlans", models.BooleanField(default=False)), + ( + "infoblox_sync_filters", + models.JSONField( + default=nautobot_ssot.integrations.infoblox.models._get_default_sync_filters, + encoder=django.core.serializers.json.DjangoJSONEncoder, + ), + ), + ( + "infoblox_dns_view_mapping", + models.JSONField(blank=True, default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder), + ), + ( + "cf_fields_ignore", + models.JSONField( + blank=True, + default=nautobot_ssot.integrations.infoblox.models._get_default_cf_fields_ignore, + encoder=django.core.serializers.json.DjangoJSONEncoder, + ), + ), + ("import_ipv4", models.BooleanField(default=True)), + ("import_ipv6", models.BooleanField(default=False)), + ("dns_record_type", models.CharField(default="create-host-record", max_length=255)), + ("fixed_address_type", models.CharField(default="do-not-create-record", max_length=255)), + ("job_enabled", models.BooleanField(default=False)), + ( + "infoblox_deletable_models", + models.JSONField(blank=True, default=list, encoder=django.core.serializers.json.DjangoJSONEncoder), + ), + ( + "nautobot_deletable_models", + models.JSONField(blank=True, default=list, encoder=django.core.serializers.json.DjangoJSONEncoder), + ), + ("default_status", models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to="extras.status")), + ( + "infoblox_instance", + models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to="extras.externalintegration"), + ), + ("tags", nautobot.core.models.fields.TagsField(through="extras.TaggedItem", to="extras.Tag")), + ], + options={ + "verbose_name": "SSOT Infoblox Config", + "verbose_name_plural": "SSOT Infoblox Configs", + }, + bases=( + models.Model, + nautobot.extras.models.mixins.DynamicGroupMixin, + nautobot.extras.models.mixins.NotesMixin, + ), + ), + ] diff --git a/nautobot_ssot/migrations/0010_automationgatewaymodel.py b/nautobot_ssot/migrations/0010_automationgatewaymodel.py new file mode 100644 index 000000000..d7f411d45 --- /dev/null +++ b/nautobot_ssot/migrations/0010_automationgatewaymodel.py @@ -0,0 +1,57 @@ +# Generated by Django 3.2.23 on 2024-06-26 19:01 + +import django.core.serializers.json +from django.db import migrations, models +import django.db.models.deletion +import nautobot.core.models.fields +import nautobot.extras.models.mixins +import uuid + + +class Migration(migrations.Migration): + + dependencies = [ + ("extras", "0102_set_null_objectchange_contenttype"), + ("dcim", "0052_fix_interface_redundancy_group_created"), + ("nautobot_ssot", "0009_ssotconfig_ssotinfobloxconfig"), + ] + + operations = [ + migrations.CreateModel( + name="AutomationGatewayModel", + fields=[ + ( + "id", + models.UUIDField( + default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True + ), + ), + ("created", models.DateTimeField(auto_now_add=True, null=True)), + ("last_updated", models.DateTimeField(auto_now=True, null=True)), + ( + "_custom_field_data", + models.JSONField(blank=True, default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder), + ), + ("name", models.CharField(max_length=255, unique=True)), + ("description", models.CharField(blank=True, max_length=512)), + ("location_descendants", models.BooleanField(default=True)), + ("enabled", models.BooleanField(default=False)), + ( + "gateway", + models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to="extras.externalintegration"), + ), + ("location", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="dcim.location")), + ("tags", nautobot.core.models.fields.TagsField(through="extras.TaggedItem", to="extras.Tag")), + ], + options={ + "verbose_name": "Automation Gateway Management", + "verbose_name_plural": "Automation Gateway Management", + "ordering": ["name", "location"], + }, + bases=( + models.Model, + nautobot.extras.models.mixins.DynamicGroupMixin, + nautobot.extras.models.mixins.NotesMixin, + ), + ), + ] diff --git a/nautobot_ssot/models.py b/nautobot_ssot/models.py index 711e32a71..e4c7810dd 100644 --- a/nautobot_ssot/models.py +++ b/nautobot_ssot/models.py @@ -28,12 +28,15 @@ from django.utils.formats import date_format from django.utils.timezone import now + from nautobot.core.models import BaseModel from nautobot.extras.choices import JobResultStatusChoices from nautobot.extras.models import JobResult from nautobot.extras.utils import extras_features from nautobot_ssot.integrations.servicenow.models import SSOTServiceNowConfig +from nautobot_ssot.integrations.infoblox.models import SSOTInfobloxConfig +from nautobot_ssot.integrations.itential.models import AutomationGatewayModel from .choices import SyncLogEntryActionChoices, SyncLogEntryStatusChoices @@ -206,7 +209,17 @@ def get_status_class(self): }.get(self.status) +class SSOTConfig(models.Model): # pylint: disable=nb-incorrect-base-class + """Non-db model providing user permission constraints.""" + + class Meta: + managed = False + default_permissions = ("view",) + + __all__ = ( + "SSOTInfobloxConfig", + "AutomationGatewayModel", "SSOTServiceNowConfig", "Sync", "SyncLogEntry", diff --git a/nautobot_ssot/navigation.py b/nautobot_ssot/navigation.py index 398e493f8..ac686903e 100644 --- a/nautobot_ssot/navigation.py +++ b/nautobot_ssot/navigation.py @@ -1,6 +1,7 @@ """App additions to the Nautobot navigation menu.""" from nautobot.apps.ui import NavMenuGroup, NavMenuItem, NavMenuTab +from .integrations.utils import each_enabled_integration_module items = [ @@ -21,6 +22,15 @@ ), ] + +def _add_integrations(): + for module in each_enabled_integration_module("navigation"): + items.extend(module.nav_items) + + +_add_integrations() + + menu_items = ( NavMenuTab( name="Plugins", diff --git a/nautobot_ssot/tables.py b/nautobot_ssot/tables.py index eb953259f..38cfde649 100644 --- a/nautobot_ssot/tables.py +++ b/nautobot_ssot/tables.py @@ -10,7 +10,7 @@ ACTION_LOGS_LINK = """ + href="{% url 'plugins:nautobot_ssot:synclogentry_list' %}?sync={{ record.id }}&action={{ action }}"> {{ value }} """ @@ -18,7 +18,7 @@ STATUS_LOGS_LINK = """ + href="{% url 'plugins:nautobot_ssot:synclogentry_list' %}?sync={{ record.id }}&status={{ status }}"> {{ value }} """ diff --git a/nautobot_ssot/templates/nautobot_ssot/ssot_configs.html b/nautobot_ssot/templates/nautobot_ssot/ssot_configs.html new file mode 100644 index 000000000..db6c990fd --- /dev/null +++ b/nautobot_ssot/templates/nautobot_ssot/ssot_configs.html @@ -0,0 +1,44 @@ +{% extends 'base.html' %} +{% load helpers %} + +{% block header %} +
+
+ +
+
+
+
+ +

{% block title %}SSOT Configs{% endblock %}

+{% endblock header %} + +{% block content %} +
+
+
+ SSOT Integration Configs +
+ + {% if perms.nautobot_ssot.view_ssotinfobloxconfig and "infoblox" in enabled_integrations %} + + + + + {% endif %} + {% if perms.nautobot_ssot.view_ssotservicenowconfig and "servicenow" in enabled_integrations %} + + + + + {% endif %} +
Infoblox + Infoblox Configuration List +
ServiceNow + ServiceNow Configuration Instance +
+
+
+{% endblock content %} \ No newline at end of file diff --git a/nautobot_ssot/templates/nautobot_ssot_infoblox/nautobot_ssot_infoblox_config.html b/nautobot_ssot/templates/nautobot_ssot_infoblox/nautobot_ssot_infoblox_config.html new file mode 100644 index 000000000..c47ffc6fb --- /dev/null +++ b/nautobot_ssot/templates/nautobot_ssot_infoblox/nautobot_ssot_infoblox_config.html @@ -0,0 +1,12 @@ +{% extends 'nautobot_ssot/config.html' %} +{% load helpers %} + +{% block title %}{{ block.super }} - Infoblox Configs{% endblock %} + +{% block content %} +
+
+ {% include 'utilities/obj_table.html' with table=infobloxconfig_table table_template='panel_table.html' heading='Infoblox Configs' %} +
+
+{% endblock content %} \ No newline at end of file diff --git a/nautobot_ssot/templates/nautobot_ssot_infoblox/ssotinfobloxconfig_changelog.html b/nautobot_ssot/templates/nautobot_ssot_infoblox/ssotinfobloxconfig_changelog.html new file mode 100644 index 000000000..76441438c --- /dev/null +++ b/nautobot_ssot/templates/nautobot_ssot_infoblox/ssotinfobloxconfig_changelog.html @@ -0,0 +1 @@ +{% extends 'generic/object_changelog.html' %} diff --git a/nautobot_ssot/templates/nautobot_ssot_infoblox/ssotinfobloxconfig_list.html b/nautobot_ssot/templates/nautobot_ssot_infoblox/ssotinfobloxconfig_list.html new file mode 100644 index 000000000..b3349e3e8 --- /dev/null +++ b/nautobot_ssot/templates/nautobot_ssot_infoblox/ssotinfobloxconfig_list.html @@ -0,0 +1,12 @@ +{% extends 'generic/object_list.html' %} +{% load helpers %} +{% load buttons %} + +{% block breadcrumbs %} +
  • SSOT Configs
  • +
  • SSOT Infoblox Configs
  • +{% endblock breadcrumbs %} + +{% block buttons %} + +{% endblock buttons %} diff --git a/nautobot_ssot/templates/nautobot_ssot_infoblox/ssotinfobloxconfig_retrieve.html b/nautobot_ssot/templates/nautobot_ssot_infoblox/ssotinfobloxconfig_retrieve.html new file mode 100644 index 000000000..8de7cb409 --- /dev/null +++ b/nautobot_ssot/templates/nautobot_ssot_infoblox/ssotinfobloxconfig_retrieve.html @@ -0,0 +1,140 @@ +{% extends 'generic/object_retrieve.html' %} +{% load helpers %} +{% load buttons %} + +{% block breadcrumbs %} +
  • SSOT Configs
  • +
  • SSOT Infoblox Configs
  • +
  • {{ object|hyperlinked_object }}
  • +{% endblock breadcrumbs %} + +{% block extra_buttons %} + +{% endblock extra_buttons %} + +{% block masthead %} +

    + {% block title %}{{ object }}{% endblock title %} +

    +{% endblock masthead %} + +{% block content_left_page %} +
    +
    + Infoblox Config +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Name{{ object.name }}
    Description{{ object.description|placeholder }}
    Infoblox Instance{{ object.infoblox_instance|hyperlinked_object }}
    Default Status for Imported Objects{{ object.default_status|hyperlinked_object }}
    Infoblox WAPI Version{{ object.infoblox_wapi_version|placeholder }}
    Can be used in Sync Job{{ object.job_enabled }}
    Enable Sync from Nautobot to Infoblox{{ object.enable_sync_to_infoblox }}
    Enable Sync from Infoblox to Nautobot{{ object.enable_sync_to_nautobot }}
    Import Networks{{ object.import_subnets }}
    Import IP Addresses{{ object.import_ip_addresses }}
    Import VLANs{{ object.import_vlans }}
    Import VLAN Views{{ object.import_vlan_views }}
    Import IPv4{{ object.import_ipv4 }}
    Import IPv6{{ object.import_ipv6 }}
    Infoblox - Fixed IP Address Type{{ object.fixed_address_type }}
    Infoblox - DNS record type{{ object.dns_record_type }}
    Infoblox - deletable models{{ object.infoblox_deletable_models }}
    Nautobot - deletable models{{ object.nautobot_deletable_models }}
    +
    +{% endblock %} + +{% block content_right_page %} +
    +
    + Infoblox Sync Filters +
    + + + + +
    + {% include 'extras/inc/json_data.html' with data=object.infoblox_sync_filters format="json" %} +
    +
    +
    +
    + Infoblox Network View to DNS View Mapping +
    + + + + +
    + {% include 'extras/inc/json_data.html' with data=object.infoblox_dns_view_mapping format="json" %} +
    +
    +
    +
    + Extensible Attributes/Custom Fields to Ignore +
    + + + + +
    + {% include 'extras/inc/json_data.html' with data=object.cf_fields_ignore format="json" %} +
    +
    +{% endblock %} \ No newline at end of file diff --git a/nautobot_ssot/templates/nautobot_ssot_infoblox/ssotinfobloxconfig_update.html b/nautobot_ssot/templates/nautobot_ssot_infoblox/ssotinfobloxconfig_update.html new file mode 100644 index 000000000..f8cc8219f --- /dev/null +++ b/nautobot_ssot/templates/nautobot_ssot_infoblox/ssotinfobloxconfig_update.html @@ -0,0 +1,52 @@ +{% extends 'generic/object_create.html' %} +{% load form_helpers %} + +{% block form %} +
    +
    Infoblox Config
    +
    + {% render_field form.name %} + {% render_field form.description %} + {% render_field form.infoblox_instance %} + {% render_field form.infoblox_wapi_version %} + {% render_field form.job_enabled %} + {% render_field form.enable_sync_to_infoblox %} + {% render_field form.enable_sync_to_nautobot %} + {% render_field form.import_ip_addresses %} + {% render_field form.import_subnets %} + {% render_field form.import_vlan_views %} + {% render_field form.import_vlans %} + {% render_field form.import_ipv4 %} + {% render_field form.import_ipv6 %} + {% render_field form.fixed_address_type %} + {% render_field form.dns_record_type %} + {% render_field form.infoblox_deletable_models %} + {% render_field form.nautobot_deletable_models %} + {% render_field form.default_status %} +
    +
    +
    +
    Data
    +
    + {% render_field form.infoblox_sync_filters %} +
    +
    +
    +
    Data
    +
    + {% render_field form.infoblox_dns_view_mapping %} +
    +
    +
    +
    Data
    +
    + {% render_field form.cf_fields_ignore %} +
    +
    +
    +
    Notes
    +
    + {% render_field form.object_note %} +
    +
    +{% endblock %} \ No newline at end of file diff --git a/nautobot_ssot/tests/aristacv/test_utils_nautobot.py b/nautobot_ssot/tests/aristacv/test_utils_nautobot.py index 5a5640fc4..026cba75e 100644 --- a/nautobot_ssot/tests/aristacv/test_utils_nautobot.py +++ b/nautobot_ssot/tests/aristacv/test_utils_nautobot.py @@ -16,6 +16,10 @@ class TestNautobotUtils(TestCase): databases = ("default", "job_logs") + def setUp(self): + """Configure shared test vars.""" + self.arista_manu = Manufacturer.objects.get_or_create(name="Arista")[0] + def test_verify_site_success(self): """Test the verify_site method for existing Site.""" loc_type = LocationType.objects.get_or_create(name="Site")[0] @@ -33,9 +37,7 @@ def test_verify_site_fail(self): def test_verify_device_type_object_success(self): """Test the verify_device_type_object for existing DeviceType.""" - new_dt, _ = DeviceType.objects.get_or_create( - model="DCS-7150S-24", manufacturer=Manufacturer.objects.get(name="Arista") - ) + new_dt, _ = DeviceType.objects.get_or_create(model="DCS-7150S-24", manufacturer=self.arista_manu) result = nautobot.verify_device_type_object(device_type="DCS-7150S-24") self.assertEqual(result, new_dt) diff --git a/nautobot_ssot/tests/contrib_base_classes.py b/nautobot_ssot/tests/contrib_base_classes.py index 28330f748..c694e85a4 100644 --- a/nautobot_ssot/tests/contrib_base_classes.py +++ b/nautobot_ssot/tests/contrib_base_classes.py @@ -33,9 +33,12 @@ def setUpTestData(cls): cls.device_role.content_types.set([ContentType.objects.get_for_model(dcim_models.Device)]) cls.manufacturer = dcim_models.Manufacturer.objects.create(name="Generic Inc.") cls.device_type = dcim_models.DeviceType.objects.create(model="Generic Switch", manufacturer=cls.manufacturer) + cls.location_type, created = dcim_models.LocationType.objects.get_or_create(name="Site") + if created: + cls.location_type.content_types.add(ContentType.objects.get_for_model(dcim_models.Device)) cls.location = dcim_models.Location.objects.create( name="Bremen", - location_type=dcim_models.LocationType.objects.get_or_create(name="Site")[0], + location_type=cls.location_type, status=cls.status_active, ) for name in ["sw01", "sw02"]: diff --git a/nautobot_ssot/tests/device42/unit/test_models_nautobot_ipam.py b/nautobot_ssot/tests/device42/unit/test_models_nautobot_ipam.py index f1e5a26e9..f5a39d73d 100644 --- a/nautobot_ssot/tests/device42/unit/test_models_nautobot_ipam.py +++ b/nautobot_ssot/tests/device42/unit/test_models_nautobot_ipam.py @@ -200,6 +200,7 @@ def setUp(self): status_reserved = Status.objects.get(name="Reserved") loc_type = LocationType.objects.get_or_create(name="Site")[0] loc_type.content_types.add(ContentType.objects.get_for_model(Device)) + loc_type.content_types.add(ContentType.objects.get_for_model(Prefix)) loc = Location.objects.get_or_create(name="Test Site", location_type=loc_type, status=self.status_active)[0] cisco_manu = Manufacturer.objects.get_or_create(name="Cisco")[0] csr1000v = DeviceType.objects.get_or_create(model="CSR1000v", manufacturer=cisco_manu)[0] @@ -420,8 +421,9 @@ def setUp(self): super().setUp() self.status_active = Status.objects.get(name="Active") - site_type = LocationType.objects.get(name="Site") + site_type = LocationType.objects.get_or_create(name="Site")[0] site_type.content_types.add(ContentType.objects.get_for_model(Device)) + site_type.content_types.add(ContentType.objects.get_for_model(VLAN)) self.test_site = Location.objects.create(name="HQ", location_type=site_type, status=self.status_active) self.diffsync = DiffSync() diff --git a/nautobot_ssot/tests/device42/unit/test_utils_device42.py b/nautobot_ssot/tests/device42/unit/test_utils_device42.py index 40ce4543a..1bdd6476c 100644 --- a/nautobot_ssot/tests/device42/unit/test_utils_device42.py +++ b/nautobot_ssot/tests/device42/unit/test_utils_device42.py @@ -164,7 +164,6 @@ def test_get_intf_status(self, name, sent, received): # pylint: disable=unused- ("iosxe", "iosxe", "cisco_ios"), ("iosxr", "iosxr", "cisco_xr"), ("nxos", "nxos", "cisco_nxos"), - ("bigip", "f5", "f5_tmsh"), ("junos", "junos", "juniper_junos"), ("dell", "dell", "dell"), ] diff --git a/nautobot_ssot/tests/device42/unit/test_utils_nautobot.py b/nautobot_ssot/tests/device42/unit/test_utils_nautobot.py index 087fb16a7..1e8e667e1 100644 --- a/nautobot_ssot/tests/device42/unit/test_utils_nautobot.py +++ b/nautobot_ssot/tests/device42/unit/test_utils_nautobot.py @@ -28,10 +28,13 @@ def setUp(self): super().setUp() self.status_active = Status.objects.get(name="Active") self.cisco_manu, _ = Manufacturer.objects.get_or_create(name="Cisco") + site_lt = LocationType.objects.get_or_create(name="Site")[0] + site_lt.content_types.add(ContentType.objects.get_for_model(Device)) + site_lt.content_types.add(ContentType.objects.get_for_model(VLAN)) self.site = Location.objects.create( name="Test Site", status=self.status_active, - location_type=LocationType.objects.get(name="Site"), + location_type=site_lt, ) self.site.validated_save() _dt = DeviceType.objects.create(model="CSR1000v", manufacturer=self.cisco_manu) @@ -154,7 +157,7 @@ def test_update_custom_fields_add_cf(self): test_site = Location.objects.create( name="Test", location_type=LocationType.objects.get_or_create(name="Site")[0], status=self.status_active ) - self.assertEqual(len(test_site.get_custom_fields()), 4) + self.assertEqual(len(test_site.get_custom_fields()), 0) mock_cfs = { "Test Custom Field": {"key": "Test Custom Field", "value": None, "notes": None}, } diff --git a/nautobot_ssot/tests/infoblox/fixtures/get_a_record_by_ref.json b/nautobot_ssot/tests/infoblox/fixtures/get_a_record_by_ref.json new file mode 100644 index 000000000..88371fd9a --- /dev/null +++ b/nautobot_ssot/tests/infoblox/fixtures/get_a_record_by_ref.json @@ -0,0 +1,6 @@ +{ + "_ref": "record:a/ZG5zLmJpbmRfYSQuX2RlZmF1bHQudGVzdC5uYXV0b2JvdCx0ZXN0ZG5zbmFtZSwxMC4wLjAuMQ:testdnsname.nautobot.test/default", + "ipv4addr": "10.0.0.1", + "name": "testdnsname.nautobot.test", + "view": "default" +} \ No newline at end of file diff --git a/nautobot_ssot/tests/infoblox/fixtures/get_all_network_views.json b/nautobot_ssot/tests/infoblox/fixtures/get_all_network_views.json new file mode 100644 index 000000000..05b99b369 --- /dev/null +++ b/nautobot_ssot/tests/infoblox/fixtures/get_all_network_views.json @@ -0,0 +1,35 @@ +[ + { + "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:default/true", + "associated_dns_views": [ + "default" + ], + "extattrs": { + + }, + "is_default": true, + "name": "default" + }, + { + "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQx:prod/false", + "associated_dns_views": [ + "default.prod" + ], + "extattrs": { + + }, + "is_default": false, + "name": "prod" + }, + { + "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQy:dev/false", + "associated_dns_views": [ + "default.dev" + ], + "extattrs": { + + }, + "is_default": false, + "name": "dev" + } +] \ No newline at end of file diff --git a/nautobot_ssot/tests/infoblox/fixtures/get_authoritative_zones_for_dns_view.json b/nautobot_ssot/tests/infoblox/fixtures/get_authoritative_zones_for_dns_view.json new file mode 100644 index 000000000..3eab1e3ac --- /dev/null +++ b/nautobot_ssot/tests/infoblox/fixtures/get_authoritative_zones_for_dns_view.json @@ -0,0 +1,14 @@ +{ + "result": [ + { + "_ref": "zone_auth/ZG5zLnpvbmUkLjIuYXJwYS5pbi1hZGRy:0.0.0.0%2F0/default.dev", + "fqdn": "0.0.0.0/0", + "view": "default.dev" + }, + { + "_ref": "zone_auth/ZG5zLnpvbmUkLjIudGVzdC5sb2NhbC5uYXV0b2JvdA:nautobot.local.test/default.dev", + "fqdn": "nautobot.local.test", + "view": "default.dev" + } + ] +} \ No newline at end of file diff --git a/nautobot_ssot/tests/infoblox/fixtures/get_fixed_address_by_ref.json b/nautobot_ssot/tests/infoblox/fixtures/get_fixed_address_by_ref.json new file mode 100644 index 000000000..b3d8027f8 --- /dev/null +++ b/nautobot_ssot/tests/infoblox/fixtures/get_fixed_address_by_ref.json @@ -0,0 +1,11 @@ +{ + "_ref": "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjIuMi4u:10.0.0.2/dev", + "extattrs": { + + }, + "ipv4addr": "10.0.0.2", + "mac": "52:1f:83:d4:9a:2e", + "name": "host-fixed1", + "network": "10.0.0.0/24", + "network_view": "dev" +} \ No newline at end of file diff --git a/nautobot_ssot/tests/infoblox/fixtures/get_host_by_ref.json b/nautobot_ssot/tests/infoblox/fixtures/get_host_by_ref.json new file mode 100644 index 000000000..f04532ddd --- /dev/null +++ b/nautobot_ssot/tests/infoblox/fixtures/get_host_by_ref.json @@ -0,0 +1,15 @@ +{ + "_ref": "record:host/ZG5zLmhvc3QkLl9kZWZhdWx0LnRlc3QudGVzdGRldmljZTE:testdevice1.test/default", + "ipv4addr": "10.220.0.101", + "ipv4addrs": [ + { + "_ref": "record:host_ipv4addr/ZG5zLmhvc3RfYWRkcmVzcyQuX2RlZmF1bHQudGVzdC50ZXN0ZGV2aWNlMS4xMC4yMjAuMC4xMDEu:10.220.0.101/testdevice1.test/default", + "configure_for_dhcp": true, + "host": "testdevice1.test", + "ipv4addr": "10.220.0.101", + "mac": "11:11:11:11:11:11" + } + ], + "name": "testdevice1.test", + "view": "default" +} diff --git a/nautobot_ssot/tests/infoblox/fixtures/get_network_view.json b/nautobot_ssot/tests/infoblox/fixtures/get_network_view.json new file mode 100644 index 000000000..8f032a17f --- /dev/null +++ b/nautobot_ssot/tests/infoblox/fixtures/get_network_view.json @@ -0,0 +1,13 @@ +[ + { + "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQy:dev/false", + "associated_dns_views": [ + "default.dev" + ], + "extattrs": { + + }, + "is_default": false, + "name": "dev" + } +] \ No newline at end of file diff --git a/nautobot_ssot/tests/infoblox/fixtures/get_ptr_record_by_ip.json b/nautobot_ssot/tests/infoblox/fixtures/get_ptr_record_by_ip.json new file mode 100644 index 000000000..0acc7e1dc --- /dev/null +++ b/nautobot_ssot/tests/infoblox/fixtures/get_ptr_record_by_ip.json @@ -0,0 +1,16 @@ +{ + "result": [ + { + "_ref": "record:ptr/ZG5zLmJpbmRfcHRyJC4yLmFycGEuaW4tYWRkci4xMC4wLjAuMS5ob3N0MS5uYXV0b2JvdC5sb2NhbC50ZXN0:1.0.0.10.in-addr.arpa/default.dev", + "extattrs": { + + }, + "ipv4addr": "10.0.0.1", + "ipv6addr": "", + "name": "1.0.0.10.in-addr.arpa", + "ptrdname": "host1.nautobot.local.test", + "view": "default.dev", + "zone": "in-addr.arpa" + } + ] +} \ No newline at end of file diff --git a/nautobot_ssot/tests/infoblox/fixtures/get_ptr_record_by_ref.json b/nautobot_ssot/tests/infoblox/fixtures/get_ptr_record_by_ref.json new file mode 100644 index 000000000..9b4580ea9 --- /dev/null +++ b/nautobot_ssot/tests/infoblox/fixtures/get_ptr_record_by_ref.json @@ -0,0 +1,8 @@ +{ + "_ref": "record:ptr/ZG5zLmJpbmRfcHRyJC4yLmFycGEuaW4tYWRkci4xMC4wLjAuMS5ob3N0MS5uYXV0b2JvdC5sb2NhbC50ZXN0:1.0.0.10.in-addr.arpa/default.dev", + "ipv4addr": "10.0.0.1", + "ipv6addr": "", + "name": "1.0.0.10.in-addr.arpa", + "ptrdname": "host1.nautobot.local.test", + "view": "default.dev" +} \ No newline at end of file diff --git a/nautobot_ssot/tests/infoblox/fixtures_infoblox.py b/nautobot_ssot/tests/infoblox/fixtures_infoblox.py index 23f2b50eb..f6594afa3 100644 --- a/nautobot_ssot/tests/infoblox/fixtures_infoblox.py +++ b/nautobot_ssot/tests/infoblox/fixtures_infoblox.py @@ -5,7 +5,23 @@ import json import os - +from django.contrib.contenttypes.models import ContentType +from nautobot.extras.choices import ( + RelationshipTypeChoices, + SecretsGroupAccessTypeChoices, + SecretsGroupSecretTypeChoices, +) +from nautobot.extras.models import ( + ExternalIntegration, + Relationship, + Secret, + SecretsGroup, + SecretsGroupAssociation, + Status, +) +from nautobot.ipam.models import VLAN, IPAddress, Prefix, VLANGroup + +from nautobot_ssot.integrations.infoblox.models import SSOTInfobloxConfig from nautobot_ssot.integrations.infoblox.utils import client FIXTURES = os.environ.get("FIXTURE_DIR", "nautobot_ssot/tests/infoblox/fixtures") @@ -19,13 +35,99 @@ def _json_read_fixture(name): return json.load(fixture) +def create_default_infoblox_config(infoblox_url="infoblox.example.com"): + default_status, _ = Status.objects.get_or_create(name="Active") + for model in [IPAddress, Prefix, VLAN, VLANGroup]: + default_status.content_types.add(ContentType.objects.get_for_model(model)) + infoblox_sync_filters = [{"network_view": "default"}] + secrets_group, _ = SecretsGroup.objects.get_or_create(name="InfobloxSSOTUnitTesting") + infoblox_username, _ = Secret.objects.get_or_create( + name="Infoblox Username - Unit Testing", + defaults={ + "provider": "environment-variable", + "parameters": {"variable": "NAUTOBOT_SSOT_INFOBLOX_USERNAME"}, + }, + ) + infoblox_password, _ = Secret.objects.get_or_create( + name="Infoblox Password - Unit Testing", + defaults={ + "provider": "environment-variable", + "parameters": {"variable": "NAUTOBOT_SSOT_INFOBLOX_PASSWORD"}, + }, + ) + SecretsGroupAssociation.objects.get_or_create( + secrets_group=secrets_group, + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_USERNAME, + defaults={ + "secret": infoblox_username, + }, + ) + SecretsGroupAssociation.objects.get_or_create( + secrets_group=secrets_group, + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_PASSWORD, + defaults={ + "secret": infoblox_password, + }, + ) + external_integration, _ = ExternalIntegration.objects.get_or_create( + name="InfobloxUnitTestingInstance", + remote_url=infoblox_url, + secrets_group=secrets_group, + verify_ssl=True, + timeout=60, + ) + + config, _ = SSOTInfobloxConfig.objects.get_or_create( + name="InfobloxUnitTestConfig", + defaults=dict( # pylint: disable=use-dict-literal + description="Unit Test Config.", + default_status=default_status, + infoblox_wapi_version="v2.12", + infoblox_instance=external_integration, + enable_sync_to_infoblox=True, + import_ip_addresses=True, + import_subnets=True, + import_vlan_views=True, + import_vlans=True, + import_ipv4=True, + import_ipv6=True, + job_enabled=True, + infoblox_sync_filters=infoblox_sync_filters, + ), + ) + + return config + + def localhost_client_infoblox(localhost_url): """Return InfobloxAPI client for testing.""" return client.InfobloxApi( # nosec - url=localhost_url, username="test-user", password="test-password", verify_ssl=False, cookie=None + url=localhost_url, + username="test-user", + password="test-password", + verify_ssl=False, + wapi_version="v2.12", + timeout=60, + cookie=None, ) +def create_prefix_relationship(): + """Create Relationship for Prefix -> VLAN.""" + relationship_dict = { # pylint: disable=duplicate-code + "label": "Prefix -> VLAN", + "key": "prefix_to_vlan", + "type": RelationshipTypeChoices.TYPE_ONE_TO_MANY, + "source_type": ContentType.objects.get_for_model(Prefix), + "source_label": "Prefix", + "destination_type": ContentType.objects.get_for_model(VLAN), + "destination_label": "VLAN", + } + return Relationship.objects.get_or_create(label=relationship_dict["label"], defaults=relationship_dict)[0] + + def get_all_ipv4address_networks(): """Return all IPv4Address networks.""" return _json_read_fixture("get_all_ipv4address_networks.json") @@ -61,11 +163,21 @@ def create_host_record(): return _json_read_fixture("create_host_record.json") +def get_fixed_address_by_ref(): + """Return a get Fixed Address by ref response.""" + return _json_read_fixture("get_fixed_address_by_ref.json") + + def get_host_by_ip(): """Return a get Host by IP response.""" return _json_read_fixture("get_host_by_ip.json") +def get_host_by_ref(): + """Return a get Host by ref response.""" + return _json_read_fixture("get_host_by_ref.json") + + def get_a_record_by_ip(): """Return a get A record by IP response.""" return _json_read_fixture("get_a_record_by_ip.json") @@ -76,6 +188,11 @@ def get_a_record_by_name(): return _json_read_fixture("get_a_record_by_name.json") +def get_a_record_by_ref(): + """Return a get A record by ref response.""" + return _json_read_fixture("get_a_record_by_ref.json") + + def get_host_record_by_name(): """Return a get Host record by name response.""" return _json_read_fixture("get_host_record_by_name.json") @@ -106,16 +223,31 @@ def get_authoritative_zone(): return _json_read_fixture("get_authoritative_zone.json") +def get_authoritative_zones_for_dns_view(): + """Return a get authoritative zones for view response.""" + return _json_read_fixture("get_authoritative_zones_for_dns_view.json") + + def find_network_reference(): """Return a find network reference response.""" return _json_read_fixture("find_network_reference.json") +def get_ptr_record_by_ip(): + """Return a get PTR record by IP response.""" + return _json_read_fixture("get_ptr_record_by_ip.json") + + def get_ptr_record_by_name(): """Return a get PTR record by name response.""" return _json_read_fixture("get_ptr_record_by_name.json") +def get_ptr_record_by_ref(): + """Return a get PTR record by ref response.""" + return _json_read_fixture("get_ptr_record_by_ref.json") + + def find_next_available_ip(): """Return a next available IP response.""" return _json_read_fixture("find_next_available_ip.json") @@ -136,6 +268,16 @@ def get_network_containers_ipv6(): return _json_read_fixture("get_network_containers_ipv6.json") +def get_all_network_views(): + """Return a all_network_views response.""" + return _json_read_fixture("get_all_network_views.json") + + +def get_network_view(): + """Return a get_network_view response.""" + return _json_read_fixture("get_network_view.json") + + def get_all_ranges(): """Return a get all ranges response.""" return _json_read_fixture("get_all_ranges.json") diff --git a/nautobot_ssot/tests/infoblox/test_client.py b/nautobot_ssot/tests/infoblox/test_client.py index 82f652d8d..aa04fb03a 100644 --- a/nautobot_ssot/tests/infoblox/test_client.py +++ b/nautobot_ssot/tests/infoblox/test_client.py @@ -2,42 +2,50 @@ # pylint: disable=protected-access # pylint: disable=too-many-public-methods +import unittest from collections import namedtuple from os import path - -import unittest from unittest.mock import patch -from requests.models import HTTPError + import requests_mock +from requests.models import HTTPError from nautobot_ssot.integrations.infoblox.utils.client import InvalidUrlScheme, get_dns_name from .fixtures_infoblox import ( - get_ptr_record_by_name, - localhost_client_infoblox, - get_all_ipv4address_networks, - get_all_ipv4address_networks_medium, - get_all_ipv4address_networks_large, - get_all_ipv4address_networks_bulk, - create_ptr_record, + LOCALHOST, create_a_record, create_host_record, - get_host_by_ip, + create_ptr_record, + find_network_reference, + find_next_available_ip, get_a_record_by_ip, get_a_record_by_name, - get_host_record_by_name, + get_a_record_by_ref, get_all_dns_views, - get_dhcp_lease_from_ipv4, - get_dhcp_lease_from_hostname, + get_all_ipv4address_networks, + get_all_ipv4address_networks_bulk, + get_all_ipv4address_networks_large, + get_all_ipv4address_networks_medium, + get_all_network_views, get_all_ranges, get_all_subnets, get_authoritative_zone, + get_authoritative_zones_for_dns_view, + get_dhcp_lease_from_hostname, + get_dhcp_lease_from_ipv4, + get_fixed_address_by_ref, + get_host_by_ip, + get_host_by_ref, + get_host_record_by_name, get_network_containers, get_network_containers_ipv6, - find_network_reference, - find_next_available_ip, + get_network_view, + get_ptr_record_by_ip, + get_ptr_record_by_name, + get_ptr_record_by_ref, + localhost_client_infoblox, search_ipv4_address, - LOCALHOST, ) Origin = namedtuple("Origin", ["name", "slug"]) @@ -190,6 +198,29 @@ def test_get_all_ipv4_address_networks_bulk_data_success(self): resp = self.infoblox_client.get_all_ipv4address_networks(prefixes=prefixes) self.assertEqual(resp, get_all_ipv4address_networks_bulk()[0]) + def test_get_fixed_address_by_ref_success(self): + """Test get_fixed_address_by_ref success.""" + mock_ref = "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjIuMi4u:10.0.0.2/dev" + mock_response = get_fixed_address_by_ref() + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_ref}", json=mock_response, status_code=200) + resp = self.infoblox_client.get_fixed_address_by_ref(mock_ref) + + self.assertEqual(resp, mock_response) + + def test_get_fixed_address_by_ref_fail(self): + """Test get_fixed_address_by_ref fail.""" + mock_ref = "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjIuMi4u:10.0.0.2/dev" + mock_response = "" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_ref}", json=mock_response, status_code=404) + with self.assertRaises(HTTPError) as context: + self.infoblox_client.get_fixed_address_by_ref(mock_ref) + + self.assertEqual(context.exception.response.status_code, 404) + def test_get_host_record_by_name_success(self): """Test get_host_by_record success.""" mock_fqdn = "test.fqdn.com" @@ -240,6 +271,29 @@ def test_get_host_record_by_ip_fail(self): self.assertEqual(context.exception.response.status_code, 404) + def test_get_host_record_by_ref_success(self): + """Test get_host_record_by_ref success.""" + mock_ref = "record:host/ZG5zLmhvc3QkLl9kZWZhdWx0LnRlc3QudGVzdGRldmljZTE:testdevice1.test/default" + mock_response = get_host_by_ref() + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_ref}", json=mock_response, status_code=200) + resp = self.infoblox_client.get_host_record_by_ref(mock_ref) + + self.assertEqual(resp, mock_response) + + def test_get_host_record_by_ref_fail(self): + """Test get_host_record_by_ref fail.""" + mock_ref = "record:host/ZG5zLmhvc3QkLl9kZWZhdWx0LnRlc3QudGVzdGRldmljZTE:testdevice1.test/default" + mock_response = "" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_ref}", json=mock_response, status_code=404) + with self.assertRaises(HTTPError) as context: + self.infoblox_client.get_host_record_by_ref(mock_ref) + + self.assertEqual(context.exception.response.status_code, 404) + def test_get_a_record_by_name_success(self): """Test get_a_record_by_name success.""" mock_fqdn = "test.fqdn.com" @@ -275,7 +329,7 @@ def test_get_a_record_by_ip_success(self): req.get(f"{LOCALHOST}/{mock_uri}", json=mock_response, status_code=200) resp = self.infoblox_client.get_a_record_by_ip(mock_ip) - self.assertEqual(resp, mock_response["result"]) + self.assertEqual(resp, mock_response["result"][0]) def test_get_a_record_by_ip_fail(self): """Test get_a_record_by_ip fail.""" @@ -290,6 +344,33 @@ def test_get_a_record_by_ip_fail(self): self.assertEqual(context.exception.response.status_code, 404) + def test_get_a_record_by_ref_success(self): + """Test get_a_record_by_ref success.""" + mock_ref = ( + "record:a/ZG5zLmJpbmRfYSQuX2RlZmF1bHQudGVzdCx0ZXN0ZGV2aWNlMSwxMC4yMjAuMC4xMDE:testdevice1.test/default" + ) + mock_response = get_a_record_by_ref() + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_ref}", json=mock_response, status_code=200) + resp = self.infoblox_client.get_a_record_by_ref(mock_ref) + + self.assertEqual(resp, mock_response) + + def test_get_a_record_by_ref_fail(self): + """Test get_a_record_by_ref fail.""" + mock_ref = ( + "record:a/aG5zLmJpbmRfYSQuX2RlZmF1bHQudGVzdCx0ZXN0ZGV2aWNlMSwxMC4yMjAuMC4xMDE:testdevice1.test/default" + ) + mock_response = "" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_ref}", json=mock_response, status_code=404) + with self.assertRaises(HTTPError) as context: + self.infoblox_client.get_a_record_by_ref(mock_ref) + + self.assertEqual(context.exception.response.status_code, 404) + def test_get_all_dns_views_success(self): """Test get_all_dns_views success.""" mock_response = get_all_dns_views() @@ -536,7 +617,7 @@ def test_find_next_available_ip_success(self, mock_find_network_reference): req.post(f"{LOCALHOST}/{mock_uri}", json=mock_response, status_code=200) next_ip = self.infoblox_client.find_next_available_ip(test_network) - print(next_ip) + self.assertEqual(next_ip, "10.220.0.1") @patch("nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi._find_network_reference") @@ -659,6 +740,58 @@ def test_get_ptr_record_by_name_fail(self): self.assertEqual(context.exception.response.status_code, 404) + def test_get_ptr_record_by_ip_success(self): + """Test get_ptr_record_by_ip success.""" + mock_ip = "10.0.0.1" + mock_response = get_ptr_record_by_ip() + mock_uri = "record:ptr" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_uri}", json=mock_response, status_code=200) + resp = self.infoblox_client.get_ptr_record_by_name(mock_ip) + + self.assertEqual(resp, mock_response["result"]) + + def test_get_ptr_record_by_ip_fail(self): + """Test get_ptr_record_by_ip success.""" + mock_ip = "10.0.0.2" + mock_response = "" + mock_uri = "record:ptr" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_uri}", json=mock_response, status_code=404) + with self.assertRaises(HTTPError) as context: + self.infoblox_client.get_ptr_record_by_ip(mock_ip) + + self.assertEqual(context.exception.response.status_code, 404) + + def test_get_ptr_record_by_ref_success(self): + """Test get_ptr_record_by_ref success.""" + mock_ref = ( + "record:a/ZG5zLmJpbmRfYSQuX2RlZmF1bHQudGVzdCx0ZXN0ZGV2aWNlMSwxMC4yMjAuMC4xMDE:testdevice1.test/default" + ) + mock_response = get_ptr_record_by_ref() + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_ref}", json=mock_response, status_code=200) + resp = self.infoblox_client.get_ptr_record_by_ref(mock_ref) + + self.assertEqual(resp, mock_response) + + def test_get_ptr_record_by_ref_fail(self): + """Test get_ptr_record_by_ref fail.""" + mock_ref = ( + "record:a/aG5zLmJpbmRfYSQuX2RlZmF1bHQudGVzdCx0ZXN0ZGV2aWNlMSwxMC4yMjAuMC4xMDE:testdevice1.test/default" + ) + mock_response = "" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_ref}", json=mock_response, status_code=404) + with self.assertRaises(HTTPError) as context: + self.infoblox_client.get_ptr_record_by_ref(mock_ref) + + self.assertEqual(context.exception.response.status_code, 404) + def test_search_ipv4_address_success(self): """Test search_ipv4_address success.""" mock_ip = "10.223.0.42" @@ -705,3 +838,90 @@ def test_get_network_containers_ipv6(self): resp = self.infoblox_client.get_network_containers(ipv6=True) self.assertEqual(resp, mock_response["result"]) + + def test_get_network_views_success(self): + """Test get_network_views.""" + mock_response = get_all_network_views() + mock_uri = "networkview" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_uri}", json=mock_response, status_code=200) + resp = self.infoblox_client.get_network_views() + + self.assertEqual(resp, mock_response) + + def test_get_network_view_success(self): + """Test get_network_view success.""" + mock_name = "dev" + mock_response = get_network_view() + mock_uri = "networkview" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_uri}", json=mock_response, status_code=200) + resp = self.infoblox_client.get_network_view(mock_name) + + self.assertEqual(resp, mock_response) + + def test_get_network_view_fail(self): + """Test get_ptr_record_by_ref fail.""" + mock_name = "dev" + mock_response = "" + mock_uri = "networkview" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_uri}", json=mock_response, status_code=404) + resp = self.infoblox_client.get_network_view(mock_name) + + self.assertEqual(resp, []) + + def test_get_default_dns_view_for_network_view(self): + """Test get_default_dns_view_for_network_view success.""" + mock_name = "dev" + mock_response = get_network_view() + mock_uri = "networkview" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_uri}", json=mock_response, status_code=200) + resp = self.infoblox_client.get_default_dns_view_for_network_view(mock_name) + + self.assertEqual(resp, "default.dev") + + def test_get_dns_view_for_network_view_from_default(self): + """Test get_dns_view_for_network_view using default view.""" + mock_name = "dev" + mock_response = get_network_view() + mock_uri = "networkview" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_uri}", json=mock_response, status_code=200) + resp = self.infoblox_client.get_dns_view_for_network_view(mock_name) + + self.assertEqual(resp, "default.dev") + + def test_get_dns_view_for_network_view_from_config(self): + """Test get_dns_view_for_network_view using configured mapping.""" + mock_name = "dev" + mock_network_view_to_dns_map = {"dev": "dev-view"} + mock_response = get_network_view() + mock_uri = "networkview" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_uri}", json=mock_response, status_code=200) + with unittest.mock.patch.object( + self.infoblox_client, "network_view_to_dns_map", mock_network_view_to_dns_map + ): + resp = self.infoblox_client.get_dns_view_for_network_view(mock_name) + + self.assertEqual(resp, "dev-view") + + def test_get_authoritative_zones_for_dns_view(self): + """Test get_authoritative_zones_for_dns_view.""" + mock_view = "dev" + mock_response = get_authoritative_zones_for_dns_view() + mock_uri = "zone_auth" + + with requests_mock.Mocker() as req: + req.get(f"{LOCALHOST}/{mock_uri}", json=mock_response, status_code=200) + resp = self.infoblox_client.get_authoritative_zones_for_dns_view(mock_view) + + self.assertEqual(resp, mock_response["result"]) diff --git a/nautobot_ssot/tests/infoblox/test_infoblox_adapter.py b/nautobot_ssot/tests/infoblox/test_infoblox_adapter.py index a2a8112ef..48f23252f 100644 --- a/nautobot_ssot/tests/infoblox/test_infoblox_adapter.py +++ b/nautobot_ssot/tests/infoblox/test_infoblox_adapter.py @@ -2,16 +2,17 @@ import unittest -from nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox import ( - InfobloxAdapter, - PLUGIN_CFG, -) +from nautobot_ssot.integrations.infoblox.choices import FixedAddressTypeChoices +from nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox import InfobloxAdapter + +from .fixtures_infoblox import create_default_infoblox_config class TestInfobloxAdapter(unittest.TestCase): """Test cases for InfobloxAdapter.""" def setUp(self): + self.config = create_default_infoblox_config() with unittest.mock.patch( "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True ) as mock_client: @@ -19,9 +20,9 @@ def setUp(self): job=unittest.mock.Mock(), sync=unittest.mock.Mock(), conn=mock_client, + config=self.config, ) - @unittest.mock.patch.dict(PLUGIN_CFG, [("infoblox_import_subnets", [])]) @unittest.mock.patch( "nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox.get_default_ext_attrs", autospec=True, @@ -81,24 +82,24 @@ def test_load_prefixes_no_infoblox_import_subnets( "ranges": [], }, ] - self.infoblox_adapter.load_prefixes() + sync_filters = [{"network_view": "default"}] + self.infoblox_adapter.load_prefixes(include_ipv4=True, include_ipv6=False, sync_filters=sync_filters) self.infoblox_adapter.conn.get_tree_from_container.assert_not_called() mock_default_extra_attrs.assert_called_once() self.assertEqual(mock_extra_attr_dict.call_count, 4) mock_build_vlan_map.assert_called_once() self.assertEqual(len(self.infoblox_adapter.get_all("prefix")), 4) - self.infoblox_adapter.conn.get_network_containers.assert_has_calls([unittest.mock.call()]) - self.infoblox_adapter.conn.get_all_subnets.assert_has_calls([unittest.mock.call()]) - subnet_with_attrs = self.infoblox_adapter.get("prefix", "10.0.0.0/23") + self.infoblox_adapter.conn.get_network_containers.assert_has_calls([unittest.mock.call(network_view="default")]) + self.infoblox_adapter.conn.get_all_subnets.assert_has_calls([unittest.mock.call(network_view="default")]) + subnet_with_attrs = self.infoblox_adapter.get("prefix", "10.0.0.0/23__Global") self.assertEqual(subnet_with_attrs.ext_attrs, {"attr1": "data", "attr2": "value"}) self.assertEqual(subnet_with_attrs.vlans, {10: {"vid": 10, "name": "ten", "group": "group_a"}}) self.assertEqual(subnet_with_attrs.ranges, ["10.0.0.150-10.0.0.254", "10.0.1.150-10.0.1.254"]) - subnet_without_attrs = self.infoblox_adapter.get("prefix", "10.0.100.0/24") + subnet_without_attrs = self.infoblox_adapter.get("prefix", "10.0.100.0/24__Global") self.assertEqual(subnet_without_attrs.ext_attrs, {"attr1": "data"}) self.assertEqual(subnet_without_attrs.vlans, {}) self.assertEqual(subnet_without_attrs.ranges, []) - @unittest.mock.patch.dict(PLUGIN_CFG, [("infoblox_import_subnets", ["10.0.0.0/8", "192.168.0.0/16"])]) @unittest.mock.patch( "nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox.get_default_ext_attrs", autospec=True, @@ -163,25 +164,29 @@ def test_load_prefixes_with_infoblox_import_subnets( }, ] self.infoblox_adapter.conn.get_all_subnets.side_effect = [one_nine_two_network] - self.infoblox_adapter.conn.remove_duplicates.side_effect = [ten_network + one_nine_two_network, ten_container] - self.infoblox_adapter.load_prefixes() + sync_filters = [{"network_view": "default", "prefixes_ipv4": ["10.0.0.0/8", "192.168.0.0/16"]}] + self.infoblox_adapter.load_prefixes(include_ipv4=True, include_ipv6=False, sync_filters=sync_filters) self.infoblox_adapter.conn.get_tree_from_container.assert_has_calls( - [unittest.mock.call("10.0.0.0/8"), unittest.mock.call("192.168.0.0/16")] + [ + unittest.mock.call(root_container="10.0.0.0/8", network_view="default"), + unittest.mock.call(root_container="192.168.0.0/16", network_view="default"), + ] ) self.assertEqual(self.infoblox_adapter.conn.get_tree_from_container.call_count, 2) self.infoblox_adapter.conn.get_child_subnets_from_container.assert_has_calls( - [unittest.mock.call(prefix="10.0.0.0/8"), unittest.mock.call(prefix="10.0.0.0/16")] + [ + unittest.mock.call(prefix="10.0.0.0/8", network_view="default"), + unittest.mock.call(prefix="10.0.0.0/16", network_view="default"), + ] ) self.assertEqual(self.infoblox_adapter.conn.get_child_subnets_from_container.call_count, 2) self.infoblox_adapter.conn.get_all_subnets.assert_called_once() - self.infoblox_adapter.conn.get_all_subnets.assert_called_with("192.168.0.0/16") - self.assertEqual(self.infoblox_adapter.conn.remove_duplicates.call_count, 2) + self.infoblox_adapter.conn.get_all_subnets.assert_called_with("192.168.0.0/16", network_view="default") mock_default_extra_attrs.assert_called_once() self.assertEqual(mock_extra_attr_dict.call_count, 4) mock_build_vlan_map.assert_not_called() self.assertEqual(len(self.infoblox_adapter.get_all("prefix")), 4) - @unittest.mock.patch.dict(PLUGIN_CFG, [("infoblox_import_subnets", [])]) @unittest.mock.patch( "nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox.get_default_ext_attrs", autospec=True, @@ -223,21 +228,15 @@ def test_load_prefixes_add_duplicate_prefix( "ranges": [], }, ] - error_message = ( - "Duplicate prefix found: 10.0.0.0/23. Duplicate prefixes are not supported, " - "and only the first occurrence will be included in the sync. To load data " - "from a single Network View, use the 'infoblox_network_view' setting." - ) - self.infoblox_adapter.load_prefixes() + error_message = "Duplicate prefix found: 10.0.0.0/23__Global." + sync_filters = [{"network_view": "default"}] + self.infoblox_adapter.load_prefixes(include_ipv4=True, include_ipv6=False, sync_filters=sync_filters) self.infoblox_adapter.job.logger.warning.assert_called_once() self.infoblox_adapter.job.logger.warning.assert_called_with(error_message) mock_build_vlan_map.assert_not_called() self.assertEqual(mock_extra_attr_dict.call_count, 2) mock_default_extra_attrs.assert_called_once() - @unittest.mock.patch.dict( - PLUGIN_CFG, [("infoblox_import_subnets", []), ("infoblox_import_objects_subnets_ipv6", True)] - ) @unittest.mock.patch( "nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox.get_default_ext_attrs", autospec=True, @@ -322,19 +321,353 @@ def test_load_prefixes_ipv6_subnets( }, ], ] - self.infoblox_adapter.load_prefixes() + sync_filters = [{"network_view": "default"}] + self.infoblox_adapter.load_prefixes(include_ipv4=True, include_ipv6=True, sync_filters=sync_filters) self.infoblox_adapter.conn.get_tree_from_container.assert_not_called() mock_default_extra_attrs.assert_called_once() self.assertEqual(mock_extra_attr_dict.call_count, 6) mock_build_vlan_map.assert_called_once() self.assertEqual(len(self.infoblox_adapter.get_all("prefix")), 6) self.infoblox_adapter.conn.get_network_containers.assert_has_calls( - [unittest.mock.call(), unittest.mock.call(ipv6=True)] + [unittest.mock.call(network_view="default"), unittest.mock.call(network_view="default", ipv6=True)] ) self.infoblox_adapter.conn.get_all_subnets.assert_has_calls( - [unittest.mock.call(), unittest.mock.call(ipv6=True)] + [unittest.mock.call(network_view="default"), unittest.mock.call(network_view="default", ipv6=True)] ) - ipv6_subnet = self.infoblox_adapter.get("prefix", "2001:5b0:4100::/40") + ipv6_subnet = self.infoblox_adapter.get("prefix", "2001:5b0:4100::/40__Global") self.assertEqual(ipv6_subnet.ext_attrs, {"attr1": "data"}) self.assertEqual(ipv6_subnet.vlans, {}) self.assertEqual(ipv6_subnet.ranges, []) + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox.get_default_ext_attrs", + autospec=True, + return_value={}, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox.get_ext_attr_dict", + autospec=True, + side_effect=[{}], + ) + def test_load_ip_addresses_fixed_only( + self, + mock_extra_attr_dict, + mock_default_extra_attrs, + ): + """Test loading IP Addresses with one fixed address only.""" + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + infoblox_adapter = InfobloxAdapter( + job=unittest.mock.Mock(), + sync=unittest.mock.Mock(), + conn=mock_client, + config=self.config, + ) + infoblox_adapter.conn.get_ipaddr_status.return_value = "Active" + infoblox_adapter.conn.get_all_ipv4address_networks.side_effect = [ + [ + { + "_ref": "ipv4address/Li5pcHY0X2FkZHJlc3MkMTAuMjIwLjAuMTAwLzA:10.220.0.100", + "extattrs": {"Usage": {"value": "TACACS"}}, + "ip_address": "10.0.0.2", + "is_conflict": "false", + "lease_state": "FREE", + "mac_address": "", + "names": [], + "network": "10.0.0.0/24", + "network_view": "dev", + "objects": ["fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjIuMi4u:10.0.0.2/dev"], + "status": "USED", + "types": ["RESERVATION"], + "usage": ["DHCP"], + }, + ] + ] + infoblox_adapter.conn.get_fixed_address_by_ref.return_value = { + "_ref": "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjIuMi4u:10.0.0.2/dev", + "ipv4addr": "10.0.0.2", + "extattrs": {}, + "name": "fa-server1", + "comment": "fa server", + "network": "10.0.0.0/24", + "network_view": "dev", + } + infoblox_adapter.load_ipaddresses() + ip_address = infoblox_adapter.get( + "ipaddress", + {"address": "10.0.0.2", "prefix": "10.0.0.0/24", "prefix_length": 24, "namespace": "dev"}, + ) + + self.assertEqual("10.0.0.2", ip_address.address) + self.assertEqual("10.0.0.0/24", ip_address.prefix) + self.assertEqual(24, ip_address.prefix_length) + self.assertEqual("dev", ip_address.namespace) + self.assertEqual("fa-server1", ip_address.description) + self.assertEqual("dhcp", ip_address.ip_addr_type) + self.assertEqual({}, ip_address.ext_attrs) + self.assertEqual("", ip_address.mac_address) + self.assertEqual("fa server", ip_address.fixed_address_comment) + self.assertEqual(False, ip_address.has_a_record) + self.assertEqual(False, ip_address.has_ptr_record) + self.assertEqual(False, ip_address.has_host_record) + + mock_default_extra_attrs.assert_called_once() + self.assertEqual(mock_extra_attr_dict.call_count, 1) + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox.get_default_ext_attrs", + autospec=True, + return_value={}, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox.get_ext_attr_dict", + autospec=True, + side_effect=[{}, {}, {}], + ) + def test_load_ip_addresses_fixed_dns_a_dns_ptr( # pylint: disable=too-many-statements + self, + mock_extra_attr_dict, + mock_default_extra_attrs, + ): + """Test loading IP Addresses with one fixed address, one A record and one PTR record.""" + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + infoblox_adapter = InfobloxAdapter( + job=unittest.mock.Mock(), + sync=unittest.mock.Mock(), + conn=mock_client, + config=self.config, + ) + infoblox_adapter.conn.get_ipaddr_status.return_value = "Active" + infoblox_adapter.conn.get_all_ipv4address_networks.side_effect = [ + [ + { + "_ref": "ipv4address/Li5pcHY0X2FkZHJlc3MkMTAuMC4wLjQvMg:10.0.0.4/dev", + "ip_address": "10.0.0.4", + "is_conflict": "false", + "mac_address": "", + "names": ["fa1 add", "server11.nautobot.local.test"], + "network": "10.0.0.0/24", + "network_view": "dev", + "objects": [ + "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjIuMi4u:10.0.0.4/dev", + "record:a/ZG5zLmJpbmRfYSQuMi50ZXN0LmxvY2FsLm5hdXRvYm90LHNlcnZlcjExLDEwLjAuMC40:server11.nautobot.local.test/default.dev", + "record:ptr/ZG5zLmJpbmRfcHRyJC4yLmFycGEuaW4tYWRkci4xMC4wLjAuNC5zZXJ2ZXIxMS5uYXV0b2JvdC5sb2NhbC50ZXN0:4.0.0.10.in-addr.arpa/default.dev", + ], + "status": "USED", + "types": ["RESERVATION", "A", "PTR"], + "usage": ["DHCP", "DNS"], + } + ] + ] + infoblox_adapter.conn.get_fixed_address_by_ref.return_value = { + "_ref": "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjIuMi4u:10.0.0.4/dev", + "ipv4addr": "10.0.0.4", + "extattrs": {}, + "name": "fa-server1", + "comment": "fa server", + "network": "10.0.0.0/24", + "network_view": "dev", + } + infoblox_adapter.conn.get_a_record_by_ref.return_value = { + "_ref": "record:a/ZG5zLmJpbmRfYSQuMi50ZXN0LmxvY2FsLm5hdXRvYm90LHNlcnZlcjExLDEwLjAuMC40:server11.nautobot.local.test/default.dev", + "ipv4addr": "10.0.0.4", + "name": "server11.nautobot.local.test", + "comment": "a record comment", + "view": "default", + } + infoblox_adapter.conn.get_ptr_record_by_ref.return_value = { + "_ref": "record:ptr/ZG5zLmJpbmRfcHRyJC4yLmFycGEuaW4tYWRkci4xMC4wLjAuNC5zZXJ2ZXIxMS5uYXV0b2JvdC5sb2NhbC50ZXN0:4.0.0.10.in-addr.arpa/default.dev", + "ipv4addr": "10.0.0.4", + "ipv6addr": "", + "name": "4.0.0.10.in-addr.arpa", + "ptrdname": "server11.nautobot.local.test", + "comment": "ptr record comment", + "view": "default.dev", + } + infoblox_adapter.load_ipaddresses() + ip_address = infoblox_adapter.get( + "ipaddress", + {"address": "10.0.0.4", "prefix": "10.0.0.0/24", "prefix_length": 24, "namespace": "dev"}, + ) + self.assertEqual("10.0.0.4", ip_address.address) + self.assertEqual("10.0.0.0/24", ip_address.prefix) + self.assertEqual(24, ip_address.prefix_length) + self.assertEqual("dev", ip_address.namespace) + self.assertEqual("Active", ip_address.status) + self.assertEqual("fa-server1", ip_address.description) + self.assertEqual("dhcp", ip_address.ip_addr_type) + self.assertEqual({}, ip_address.ext_attrs) + self.assertEqual("", ip_address.mac_address) + self.assertEqual(True, ip_address.has_fixed_address) + self.assertEqual("fa server", ip_address.fixed_address_comment) + self.assertEqual("RESERVED", ip_address.fixed_address_type) + self.assertEqual( + "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjIuMi4u:10.0.0.4/dev", ip_address.fixed_address_ref + ) + self.assertEqual(True, ip_address.has_a_record) + self.assertEqual(True, ip_address.has_ptr_record) + self.assertEqual(False, ip_address.has_host_record) + + a_record = infoblox_adapter.get( + "dnsarecord", + {"address": "10.0.0.4", "prefix": "10.0.0.0/24", "prefix_length": 24, "namespace": "dev"}, + ) + self.assertEqual("10.0.0.4", a_record.address) + self.assertEqual("10.0.0.0/24", a_record.prefix) + self.assertEqual(24, a_record.prefix_length) + self.assertEqual("dev", a_record.namespace) + self.assertEqual("Active", a_record.status) + self.assertEqual("a record comment", a_record.description) + self.assertEqual("dhcp", a_record.ip_addr_type) + self.assertEqual({}, a_record.ext_attrs) + self.assertEqual("server11.nautobot.local.test", a_record.dns_name) + self.assertEqual( + "record:a/ZG5zLmJpbmRfYSQuMi50ZXN0LmxvY2FsLm5hdXRvYm90LHNlcnZlcjExLDEwLjAuMC40:server11.nautobot.local.test/default.dev", + a_record.ref, + ) + + ptr_record = infoblox_adapter.get( + "dnsptrrecord", + {"address": "10.0.0.4", "prefix": "10.0.0.0/24", "prefix_length": 24, "namespace": "dev"}, + ) + self.assertEqual("10.0.0.4", ptr_record.address) + self.assertEqual("10.0.0.0/24", ptr_record.prefix) + self.assertEqual(24, ptr_record.prefix_length) + self.assertEqual("dev", ptr_record.namespace) + self.assertEqual("Active", ptr_record.status) + self.assertEqual("ptr record comment", ptr_record.description) + self.assertEqual("dhcp", ptr_record.ip_addr_type) + self.assertEqual({}, ptr_record.ext_attrs) + self.assertEqual("server11.nautobot.local.test", ptr_record.dns_name) + self.assertEqual( + "record:ptr/ZG5zLmJpbmRfcHRyJC4yLmFycGEuaW4tYWRkci4xMC4wLjAuNC5zZXJ2ZXIxMS5uYXV0b2JvdC5sb2NhbC50ZXN0:4.0.0.10.in-addr.arpa/default.dev", + ptr_record.ref, + ) + + mock_default_extra_attrs.assert_called_once() + self.assertEqual(mock_extra_attr_dict.call_count, 3) + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox.get_default_ext_attrs", + autospec=True, + return_value={}, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox.get_ext_attr_dict", + autospec=True, + side_effect=[{}, {}], + ) + def test_load_ip_addresses_fixed_dns_host( + self, + mock_extra_attr_dict, + mock_default_extra_attrs, + ): + """Test loading IP Addresses with one fixed address and one Host record.""" + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + infoblox_adapter = InfobloxAdapter( + job=unittest.mock.Mock(), + sync=unittest.mock.Mock(), + conn=mock_client, + config=self.config, + ) + infoblox_adapter.conn.get_ipaddr_status.return_value = "Active" + infoblox_adapter.conn.get_all_ipv4address_networks.side_effect = [ + [ + { + "_ref": "ipv4address/Li5pcHY0X2FkZHJlc3MkMTAuMC4wLjMvMg:10.0.0.4/dev", + "ip_address": "10.0.0.4", + "is_conflict": "false", + "mac_address": "", + "names": ["server1.nautobot.local.test"], + "network": "10.0.0.0/24", + "network_view": "dev", + "objects": [ + "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjQuMi4u:10.0.0.4/dev", + "record:host/ZG5zLmhvc3QkLjIudGVzdC5sb2NhbC5uYXV0b2JvdC5zZXJ2ZXIx:server1.nautobot.local.test/default.dev", + ], + "status": "USED", + "types": ["HOST", "RESERVATION"], + "usage": [ + "DHCP", + "DNS", + ], + } + ] + ] + infoblox_adapter.conn.get_fixed_address_by_ref.return_value = { + "_ref": "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjQuMi4u:10.0.0.4/dev", + "ipv4addr": "10.0.0.4", + "extattrs": {}, + "name": "fa-server1", + "comment": "fa server", + "network": "10.0.0.0/24", + "network_view": "dev", + } + infoblox_adapter.conn.get_host_record_by_ref.return_value = { + "_ref": "record:host/ZG5zLmhvc3QkLl9kZWZhdWx0LnRlc3QudGVzdGRldmljZTE:testdevice1.test/default", + "ipv4addr": "10.0.0.4", + "ipv4addrs": [ + { + "_ref": "record:host/ZG5zLmhvc3QkLjIudGVzdC5sb2NhbC5uYXV0b2JvdC5zZXJ2ZXIx:server1.nautobot.local.test/default.dev", + "configure_for_dhcp": "true", + "host": "server1.nautobot.local.test", + "ipv4addr": "10.0.0.4", + "mac": "", + } + ], + "name": "server1.nautobot.local.test", + "view": "default", + "comment": "host record comment", + } + infoblox_adapter.load_ipaddresses() + ip_address = infoblox_adapter.get( + "ipaddress", + {"address": "10.0.0.4", "prefix": "10.0.0.0/24", "prefix_length": 24, "namespace": "dev"}, + ) + self.assertEqual("10.0.0.4", ip_address.address) + self.assertEqual("10.0.0.0/24", ip_address.prefix) + self.assertEqual(24, ip_address.prefix_length) + self.assertEqual("dev", ip_address.namespace) + self.assertEqual("Active", ip_address.status) + self.assertEqual("fa-server1", ip_address.description) + self.assertEqual("dhcp", ip_address.ip_addr_type) + self.assertEqual({}, ip_address.ext_attrs) + self.assertEqual("", ip_address.mac_address) + self.assertEqual(True, ip_address.has_fixed_address) + self.assertEqual("fa server", ip_address.fixed_address_comment) + self.assertEqual("RESERVED", ip_address.fixed_address_type) + self.assertEqual( + "fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLjQuMi4u:10.0.0.4/dev", ip_address.fixed_address_ref + ) + self.assertEqual(False, ip_address.has_a_record) + self.assertEqual(False, ip_address.has_ptr_record) + self.assertEqual(True, ip_address.has_host_record) + + host_record = infoblox_adapter.get( + "dnshostrecord", + {"address": "10.0.0.4", "prefix": "10.0.0.0/24", "prefix_length": 24, "namespace": "dev"}, + ) + self.assertEqual("10.0.0.4", host_record.address) + self.assertEqual("10.0.0.0/24", host_record.prefix) + self.assertEqual(24, host_record.prefix_length) + self.assertEqual("dev", host_record.namespace) + self.assertEqual("Active", host_record.status) + self.assertEqual("host record comment", host_record.description) + self.assertEqual("dhcp", host_record.ip_addr_type) + self.assertEqual({}, host_record.ext_attrs) + self.assertEqual("server1.nautobot.local.test", host_record.dns_name) + self.assertEqual( + "record:host/ZG5zLmhvc3QkLjIudGVzdC5sb2NhbC5uYXV0b2JvdC5zZXJ2ZXIx:server1.nautobot.local.test/default.dev", + host_record.ref, + ) + + mock_default_extra_attrs.assert_called_once() + self.assertEqual(mock_extra_attr_dict.call_count, 2) diff --git a/nautobot_ssot/tests/infoblox/test_infoblox_models.py b/nautobot_ssot/tests/infoblox/test_infoblox_models.py new file mode 100644 index 000000000..c31d7e9d5 --- /dev/null +++ b/nautobot_ssot/tests/infoblox/test_infoblox_models.py @@ -0,0 +1,1663 @@ +# pylint: disable=too-many-lines,too-many-public-methods +"""Unit tests for the Infoblox Diffsync models.""" +import unittest +from unittest.mock import Mock + +from django.test import TestCase + +from nautobot_ssot.integrations.infoblox.choices import ( + DNSRecordTypeChoices, + FixedAddressTypeChoices, + InfobloxDeletableModelChoices, +) +from nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox import InfobloxAdapter +from nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot import NautobotAdapter + +from .fixtures_infoblox import create_default_infoblox_config + + +def _get_ip_address_dict(attrs): + """Build dict used for creating diffsync IP address.""" + ipaddress_dict = { + "description": "Test IPAddress", + "address": "10.0.0.1", + "status": "Active", + "prefix": "10.0.0.0/8", + "prefix_length": 8, + "ip_addr_type": "dhcp", + "namespace": "Global", + } + ipaddress_dict.update(attrs) + + return ipaddress_dict + + +def _get_dns_a_record_dict(attrs): + """Build dict used for creating diffsync DNS A record.""" + dns_a_record_dict = { + "description": "Test A Record", + "address": "10.0.0.1", + "status": "Active", + "prefix": "10.0.0.0/8", + "prefix_length": 8, + "dns_name": "server1.local.test.net", + "ip_addr_type": "host", + "namespace": "Global", + } + dns_a_record_dict.update(attrs) + + return dns_a_record_dict + + +def _get_dns_ptr_record_dict(attrs): + """Build dict used for creating diffsync DNS PTR record.""" + dns_ptr_record_dict = { + "description": "Test PTR Record", + "address": "10.0.0.1", + "status": "Active", + "prefix": "10.0.0.0/8", + "prefix_length": 8, + "dns_name": "server1.local.test.net", + "ip_addr_type": "host", + "namespace": "Global", + } + dns_ptr_record_dict.update(attrs) + + return dns_ptr_record_dict + + +def _get_dns_host_record_dict(attrs): + """Build dict used for creating diffsync DNS Host record.""" + dns_host_record_dict = { + "description": "Test Host Record", + "address": "10.0.0.1", + "status": "Active", + "prefix": "10.0.0.0/8", + "prefix_length": 8, + "dns_name": "server1.local.test.net", + "ip_addr_type": "host", + "namespace": "Global", + } + dns_host_record_dict.update(attrs) + + return dns_host_record_dict + + +def _get_network_dict(attrs): + """Build dict used for creating diffsync network.""" + network_dict = { + "network": "10.0.0.0/8", + "description": "TestNetwork", + "namespace": "Global", + "status": "Active", + } + network_dict.update(attrs) + + return network_dict + + +class TestModelInfobloxNetwork(TestCase): + """Tests correct network record is created.""" + + def setUp(self): + "Test class set up." + self.config = create_default_infoblox_config() + self.nb_adapter = NautobotAdapter(config=self.config) + self.nb_adapter.job = Mock() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_network_create_network(self, mock_tag_involved_objects): + """Validate network gets created.""" + nb_network_atrs = {"network_type": "network"} + nb_ds_network = self.nb_adapter.prefix(**_get_network_dict(nb_network_atrs)) + self.nb_adapter.add(nb_ds_network) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.create_network.assert_called_once() + infoblox_adapter.conn.create_network.assert_called_with( + prefix="10.0.0.0/8", comment="TestNetwork", network_view="default" + ) + infoblox_adapter.conn.create_network_container.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_network_create_network_container(self, mock_tag_involved_objects): + """Validate network container gets created.""" + nb_network_atrs = {"network_type": "container"} + nb_ds_network = self.nb_adapter.prefix(**_get_network_dict(nb_network_atrs)) + self.nb_adapter.add(nb_ds_network) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.create_network_container.assert_called_once() + infoblox_adapter.conn.create_network_container.assert_called_with( + prefix="10.0.0.0/8", comment="TestNetwork", network_view="default" + ) + infoblox_adapter.conn.create_network.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_network_update_network(self, mock_tag_involved_objects): + """Validate network gets updated.""" + nb_network_atrs = { + "description": "New Description", + } + nb_ds_network = self.nb_adapter.prefix(**_get_network_dict(nb_network_atrs)) + self.nb_adapter.add(nb_ds_network) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_network_atrs = { + "description": "Old Description", + } + inf_ds_network = infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + infoblox_adapter.add(inf_ds_network) + infoblox_adapter.job = Mock() + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.update_network.assert_called_once() + infoblox_adapter.conn.update_network.assert_called_with( + prefix="10.0.0.0/8", comment="New Description", network_view="default" + ) + mock_tag_involved_objects.assert_called_once() + + +class TestModelInfobloxIPAddress(TestCase): + """Tests Fixed Address record operations.""" + + def setUp(self): + "Test class set up." + self.config = create_default_infoblox_config() + self.nb_adapter = NautobotAdapter(config=self.config) + self.nb_adapter.job = Mock() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ip_address_create_nothing_gets_created(self, mock_tag_involved_objects): + """Validate nothing gets created if user selects DONT_CREATE_RECORD for DNS and Fixed Address options.""" + nb_ipaddress_atrs = {"has_fixed_address": True} + nb_ds_ipaddress = self.nb_adapter.ipaddress(**_get_ip_address_dict(nb_ipaddress_atrs)) + self.nb_adapter.add(nb_ds_ipaddress) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.DONT_CREATE_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.create_fixed_address.assert_not_called() + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ip_address_create_fixed_address_reserved(self, mock_tag_involved_objects): + """Validate Fixed Address type RESERVED is created.""" + nb_ipaddress_atrs = { + "description": "FixedAddresReserved", + "fixed_address_comment": "Fixed Address Reservation", + "has_fixed_address": True, + } + nb_ds_ipaddress = self.nb_adapter.ipaddress(**_get_ip_address_dict(nb_ipaddress_atrs)) + self.nb_adapter.add(nb_ds_ipaddress) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + self.config.dns_record_type = DNSRecordTypeChoices.DONT_CREATE_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.create_fixed_address.assert_called_once() + infoblox_adapter.conn.create_fixed_address.assert_called_with( + ip_address="10.0.0.1", + name="FixedAddresReserved", + comment="Fixed Address Reservation", + match_client="RESERVED", + network_view="default", + ) + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ip_address_create_fixed_address_reserved_no_name(self, mock_tag_involved_objects): + """Validate Fixed Address type RESERVED is created with empty name.""" + nb_ipaddress_atrs = { + "description": "", + "has_fixed_address": True, + } + nb_ds_ipaddress = self.nb_adapter.ipaddress(**_get_ip_address_dict(nb_ipaddress_atrs)) + self.nb_adapter.add(nb_ds_ipaddress) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + self.config.dns_record_type = DNSRecordTypeChoices.DONT_CREATE_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.create_fixed_address.assert_called_once() + infoblox_adapter.conn.create_fixed_address.assert_called_with( + ip_address="10.0.0.1", + name="", + comment="", + match_client="RESERVED", + network_view="default", + ) + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ip_address_create_fixed_address_mac(self, mock_tag_involved_objects): + """Validate Fixed Address type MAC_ADDRESS is created.""" + nb_ipaddress_atrs = { + "description": "FixedAddresReserved", + "fixed_address_comment": "Fixed Address Reservation", + "has_fixed_address": True, + "mac_address": "52:1f:83:d4:9a:2e", + } + nb_ds_ipaddress = self.nb_adapter.ipaddress(**_get_ip_address_dict(nb_ipaddress_atrs)) + self.nb_adapter.add(nb_ds_ipaddress) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + self.config.dns_record_type = DNSRecordTypeChoices.DONT_CREATE_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.create_fixed_address.assert_called_once() + infoblox_adapter.conn.create_fixed_address.assert_called_with( + ip_address="10.0.0.1", + name="FixedAddresReserved", + comment="Fixed Address Reservation", + mac_address="52:1f:83:d4:9a:2e", + match_client="MAC_ADDRESS", + network_view="default", + ) + infoblox_adapter.conn.create_host_record.assert_not_called() + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ip_address_create_fixed_address_mac_no_name(self, mock_tag_involved_objects): + """Validate Fixed Address type MAC is created with empty name.""" + nb_ipaddress_atrs = { + "description": "", + "fixed_address_comment": "", + "has_fixed_address": True, + "mac_address": "52:1f:83:d4:9a:2e", + } + nb_ds_ipaddress = self.nb_adapter.ipaddress(**_get_ip_address_dict(nb_ipaddress_atrs)) + self.nb_adapter.add(nb_ds_ipaddress) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + self.config.dns_record_type = DNSRecordTypeChoices.DONT_CREATE_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.create_fixed_address.assert_called_once() + infoblox_adapter.conn.create_fixed_address.assert_called_with( + ip_address="10.0.0.1", + name="", + comment="", + mac_address="52:1f:83:d4:9a:2e", + match_client="MAC_ADDRESS", + network_view="default", + ) + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ip_address_update_fixed_address_type_reserved_name_and_comment(self, mock_tag_involved_objects): + """Ensure Fixed Address type RESERVED has name and comment updated.""" + nb_ipaddress_atrs = { + "has_fixed_address": True, + "description": "server2.local.test.net", + "fixed_address_comment": "new description", + } + nb_ds_ipaddress = self.nb_adapter.ipaddress(**_get_ip_address_dict(nb_ipaddress_atrs)) + self.nb_adapter.add(nb_ds_ipaddress) + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + self.config.dns_record_type = DNSRecordTypeChoices.DONT_CREATE_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_ipaddress_atrs = { + "has_fixed_address": True, + "fixed_address_ref": "fixedaddress/xyz", + "fixed_address_type": "RESERVED", + "fixed_address_name": "server1.local.test.net", + "fixed_address_comment": "old description", + } + inf_ds_ipaddress = infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_ipaddress_atrs)) + infoblox_adapter.add(inf_ds_ipaddress) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.update_fixed_address.assert_called_once() + infoblox_adapter.conn.update_fixed_address.assert_called_with( + ref="fixedaddress/xyz", data={"name": "server2.local.test.net", "comment": "new description"} + ) + infoblox_adapter.conn.update_host_record.assert_not_called() + infoblox_adapter.conn.update_a_record.assert_not_called() + infoblox_adapter.conn.update_ptr_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ip_address_update_fixed_address_type_reserved_name_and_comment_empty(self, mock_tag_involved_objects): + """Ensure Fixed Address type RESERVED has name and comment set to empty string.""" + nb_ipaddress_atrs = { + "has_fixed_address": True, + "description": "", + "fixed_address_comment": "", + } + nb_ds_ipaddress = self.nb_adapter.ipaddress(**_get_ip_address_dict(nb_ipaddress_atrs)) + self.nb_adapter.add(nb_ds_ipaddress) + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + self.config.dns_record_type = DNSRecordTypeChoices.DONT_CREATE_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_ipaddress_atrs = { + "has_fixed_address": True, + "fixed_address_ref": "fixedaddress/xyz", + "fixed_address_type": "RESERVED", + "description": "server1.local.test.net", + "fixed_address_comment": "description", + } + inf_ds_ipaddress = infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_ipaddress_atrs)) + infoblox_adapter.add(inf_ds_ipaddress) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.update_fixed_address.assert_called_once() + infoblox_adapter.conn.update_fixed_address.assert_called_with( + ref="fixedaddress/xyz", data={"name": "", "comment": ""} + ) + infoblox_adapter.conn.update_host_record.assert_not_called() + infoblox_adapter.conn.update_a_record.assert_not_called() + infoblox_adapter.conn.update_ptr_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ip_address_update_fixed_address_type_mac_update_mac(self, mock_tag_involved_objects): + """Ensure Fixed Address type MAC has MAC address updated.""" + nb_ipaddress_atrs = { + "has_fixed_address": True, + "mac_address": "52:1f:83:d4:9a:ab", + } + nb_ds_ipaddress = self.nb_adapter.ipaddress(**_get_ip_address_dict(nb_ipaddress_atrs)) + self.nb_adapter.add(nb_ds_ipaddress) + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + self.config.dns_record_type = DNSRecordTypeChoices.DONT_CREATE_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_ipaddress_atrs = { + "has_fixed_address": True, + "fixed_address_ref": "fixedaddress/xyz", + "fixed_address_type": "MAC_ADDRESS", + "mac_address": "52:1f:83:d4:9a:2e", + } + inf_ds_ipaddress = infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_ipaddress_atrs)) + infoblox_adapter.add(inf_ds_ipaddress) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.update_fixed_address.assert_called_once() + infoblox_adapter.conn.update_fixed_address.assert_called_with( + ref="fixedaddress/xyz", data={"mac": "52:1f:83:d4:9a:ab"} + ) + infoblox_adapter.conn.update_host_record.assert_not_called() + infoblox_adapter.conn.update_a_record.assert_not_called() + infoblox_adapter.conn.update_ptr_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ip_address_update_fixed_address_type_mac_name_and_comment(self, mock_tag_involved_objects): + """Ensure Fixed Address type MAC has name and comment updated.""" + nb_ipaddress_atrs = { + "description": "server2.local.test.net", + "has_fixed_address": True, + "fixed_address_comment": "new description", + } + nb_ds_ipaddress = self.nb_adapter.ipaddress(**_get_ip_address_dict(nb_ipaddress_atrs)) + self.nb_adapter.add(nb_ds_ipaddress) + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + self.config.dns_record_type = DNSRecordTypeChoices.DONT_CREATE_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_ipaddress_atrs = { + "description": "server1.local.test.net", + "has_fixed_address": True, + "fixed_address_ref": "fixedaddress/xyz", + "fixed_address_type": "MAC_ADDRESS", + "fixed_address_comment": "old description", + } + inf_ds_ipaddress = infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_ipaddress_atrs)) + infoblox_adapter.add(inf_ds_ipaddress) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.update_fixed_address.assert_called_once() + infoblox_adapter.conn.update_fixed_address.assert_called_with( + ref="fixedaddress/xyz", data={"name": "server2.local.test.net", "comment": "new description"} + ) + infoblox_adapter.conn.update_host_record.assert_not_called() + infoblox_adapter.conn.update_a_record.assert_not_called() + infoblox_adapter.conn.update_ptr_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ip_address_update_fixed_address_type_mac_name_and_comment_empty(self, mock_tag_involved_objects): + """Ensure Fixed Address type MAC has name and comment set to empty string.""" + nb_ipaddress_atrs = { + "has_fixed_address": True, + "description": "", + "fixed_address_comment": "", + } + nb_ds_ipaddress = self.nb_adapter.ipaddress(**_get_ip_address_dict(nb_ipaddress_atrs)) + self.nb_adapter.add(nb_ds_ipaddress) + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + self.config.dns_record_type = DNSRecordTypeChoices.DONT_CREATE_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_ipaddress_atrs = { + "has_fixed_address": True, + "fixed_address_ref": "fixedaddress/xyz", + "fixed_address_type": "MAC_ADDRESS", + "description": "server1.local.test.net", + "fixed_address_comment": "description", + } + inf_ds_ipaddress = infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_ipaddress_atrs)) + infoblox_adapter.add(inf_ds_ipaddress) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.update_fixed_address.assert_called_once() + infoblox_adapter.conn.update_fixed_address.assert_called_with( + ref="fixedaddress/xyz", data={"name": "", "comment": ""} + ) + infoblox_adapter.conn.update_host_record.assert_not_called() + infoblox_adapter.conn.update_a_record.assert_not_called() + infoblox_adapter.conn.update_ptr_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ip_address_delete_fail(self, mock_tag_involved_objects): + """Ensure Fixed Address is not deleted if object deletion is not enabled in the config.""" + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + self.config.infoblox_deletable_models = [] + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_ipaddress_atrs = { + "has_fixed_address": True, + "fixed_address_ref": "fixedaddress/xyz", + "fixed_address_type": "RESERVED", + "description": "server1.local.test.net", + "fixed_address_comment": "description", + } + inf_ds_ipaddress = infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_ipaddress_atrs)) + infoblox_adapter.add(inf_ds_ipaddress) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.delete_fixed_address_record_by_ref.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ip_address_delete_success(self, mock_tag_involved_objects): + """Ensure Fixed Address is deleted if object deletion is enabled in the config.""" + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + self.config.infoblox_deletable_models = [InfobloxDeletableModelChoices.FIXED_ADDRESS] + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_ipaddress_atrs = { + "has_fixed_address": True, + "fixed_address_ref": "fixedaddress/xyz", + "fixed_address_type": "RESERVED", + "description": "server1.local.test.net", + "fixed_address_comment": "description", + } + inf_ds_ipaddress = infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_ipaddress_atrs)) + infoblox_adapter.add(inf_ds_ipaddress) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.delete_fixed_address_record_by_ref.assert_called_once() + infoblox_adapter.conn.delete_fixed_address_record_by_ref.assert_called_with(ref="fixedaddress/xyz") + mock_tag_involved_objects.assert_called_once() + + +class TestModelInfobloxDnsARecord(TestCase): + """Tests DNS A model operations.""" + + def setUp(self): + "Test class set up." + self.config = create_default_infoblox_config() + self.nb_adapter = NautobotAdapter(config=self.config) + self.nb_adapter.job = Mock() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=True, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_a_record_create_nothing_gets_created(self, mock_tag_involved_objects, mock_validate_dns_name): + """Validate nothing gets created if user selects DONT_CREATE_RECORD for DNS and Fixed Address options.""" + nb_dnsarecord_atrs = {"has_fixed_address": "True"} + nb_ds_arecord = self.nb_adapter.dnsarecord(**_get_dns_a_record_dict(nb_dnsarecord_atrs)) + self.nb_adapter.add(nb_ds_arecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.DONT_CREATE_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.create_fixed_address.assert_not_called() + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_not_called() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=True, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_a_record_create(self, mock_tag_involved_objects, mock_validate_dns_name): + """Validate A Record is created.""" + nb_dnsarecord_atrs = {} + nb_ds_arecord = self.nb_adapter.dnsarecord(**_get_dns_a_record_dict(nb_dnsarecord_atrs)) + self.nb_adapter.add(nb_ds_arecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.create_a_record.assert_called_once() + infoblox_adapter.conn.create_a_record.assert_called_with( + fqdn="server1.local.test.net", ip_address="10.0.0.1", comment="Test A Record", network_view="default" + ) + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_called_once() + mock_validate_dns_name.assert_called_with( + infoblox_client=mock_client, dns_name="server1.local.test.net", network_view="default" + ) + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=True, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_a_record_create_no_dns_name(self, mock_tag_involved_objects, mock_validate_dns_name): + """Ensure DNS A record is not created if DNS name is missing.""" + nb_arecord_atrs = {"dns_name": ""} + nb_ds_arecord = self.nb_adapter.dnsarecord(**_get_dns_a_record_dict(nb_arecord_atrs)) + self.nb_adapter.add(nb_ds_arecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + job_logger = Mock() + infoblox_adapter.job.logger = job_logger + self.nb_adapter.sync_to(infoblox_adapter) + log_msg = "Cannot create Infoblox DNS A record for IP Address 10.0.0.1. DNS name is not defined." + job_logger.warning.assert_called_with(log_msg) + + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_not_called() + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=False, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_a_record_create_invalid_dns_name(self, mock_tag_involved_objects, mock_validate_dns_name): + """Ensure DNS A record is not created if DNS name is invalid.""" + nb_arecord_atrs = {"dns_name": ".invalid-dns-name"} + nb_ds_arecord = self.nb_adapter.dnsarecord(**_get_dns_a_record_dict(nb_arecord_atrs)) + self.nb_adapter.add(nb_ds_arecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + job_logger = Mock() + infoblox_adapter.job.logger = job_logger + self.nb_adapter.sync_to(infoblox_adapter) + log_msg = "Invalid zone fqdn in DNS name `.invalid-dns-name` for IP Address 10.0.0.1." + job_logger.warning.assert_called_with(log_msg) + + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_called_once() + mock_validate_dns_name.assert_called_with( + infoblox_client=mock_client, dns_name=".invalid-dns-name", network_view="default" + ) + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=True, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_a_record_update(self, mock_tag_involved_objects, mock_validate_dns_name): + """Ensure only A record is updated.""" + nb_arecord_atrs = {"dns_name": "server2.local.test.net"} + nb_ds_arecord = self.nb_adapter.dnsarecord(**_get_dns_a_record_dict(nb_arecord_atrs)) + self.nb_adapter.add(nb_ds_arecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_arecord_atrs = { + "dns_name": "server1.local.test.net", + "ref": "record:a/xyz", + } + inf_ds_arecord = infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + infoblox_adapter.add(inf_ds_arecord) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.update_a_record.assert_called_once() + infoblox_adapter.conn.update_a_record.assert_called_with( + ref="record:a/xyz", data={"name": "server2.local.test.net"} + ) + infoblox_adapter.conn.create_host_record.assert_not_called() + infoblox_adapter.conn.update_host_record.assert_not_called() + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.update_ptr_record.assert_not_called() + infoblox_adapter.conn.update_fixed_address.assert_not_called() + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_called_once() + mock_validate_dns_name.assert_called_with( + infoblox_client=mock_client, dns_name="server2.local.test.net", network_view="default" + ) + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=False, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_a_record_update_invalid_dns_name(self, mock_tag_involved_objects, mock_validate_dns_name): + """Ensure DNS A record is not updated if DNS name is invalid.""" + nb_arecord_atrs = {"dns_name": ".invalid-dns-name"} + nb_ds_arecord = self.nb_adapter.dnsarecord(**_get_dns_a_record_dict(nb_arecord_atrs)) + self.nb_adapter.add(nb_ds_arecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_arecord_atrs = { + "dns_name": "server1.local.test.net", + "ref": "record:a/xyz", + } + inf_ds_arecord = infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + infoblox_adapter.add(inf_ds_arecord) + infoblox_adapter.job = Mock() + job_logger = Mock() + infoblox_adapter.job.logger = job_logger + self.nb_adapter.sync_to(infoblox_adapter) + log_msg = "Invalid zone fqdn in DNS name `.invalid-dns-name` for IP Address 10.0.0.1." + job_logger.warning.assert_called_with(log_msg) + + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_called_once() + mock_validate_dns_name.assert_called_with( + infoblox_client=mock_client, dns_name=".invalid-dns-name", network_view="default" + ) + infoblox_adapter.conn.update_a_record.assert_not_called() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_a_record_delete_fail(self, mock_tag_involved_objects): + """Ensure DNS A record is not deleted if object deletion is not enabled in the config.""" + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + self.config.infoblox_deletable_models = [] + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_arecord_atrs = { + "dns_name": "server1.local.test.net", + "ref": "record:a/xyz", + } + inf_ds_arecord = infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + infoblox_adapter.add(inf_ds_arecord) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.delete_a_record_by_ref.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_a_record_delete_success(self, mock_tag_involved_objects): + """Ensure DNS A record is deleted if object deletion is enabled in the config.""" + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + self.config.infoblox_deletable_models = [InfobloxDeletableModelChoices.DNS_A_RECORD] + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_arecord_atrs = { + "dns_name": "server1.local.test.net", + "ref": "record:a/xyz", + } + inf_ds_arecord = infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + infoblox_adapter.add(inf_ds_arecord) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.delete_a_record_by_ref.assert_called_once() + infoblox_adapter.conn.delete_a_record_by_ref.assert_called_with(ref="record:a/xyz") + mock_tag_involved_objects.assert_called_once() + + +class TestModelInfobloxDnsHostRecord(TestCase): + """Tests DNS Host model operations.""" + + def setUp(self): + "Test class set up." + self.config = create_default_infoblox_config() + self.nb_adapter = NautobotAdapter(config=self.config) + self.nb_adapter.job = Mock() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=True, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_host_record_create_nothing_gets_created(self, mock_tag_involved_objects, mock_validate_dns_name): + """Validate nothing gets created if user selects DONT_CREATE_RECORD for DNS and Fixed Address options.""" + nb_dnshostrecord_atrs = {"has_fixed_address": "True"} + nb_ds_hostrecord = self.nb_adapter.dnshostrecord(**_get_dns_host_record_dict(nb_dnshostrecord_atrs)) + self.nb_adapter.add(nb_ds_hostrecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.DONT_CREATE_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.create_fixed_address.assert_not_called() + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_not_called() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=True, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_host_record_create(self, mock_tag_involved_objects, mock_validate_dns_name): + """Validate Host Record is created.""" + nb_dnshostrecord_atrs = {"has_fixed_address": "True"} + nb_ds_hostrecord = self.nb_adapter.dnshostrecord(**_get_dns_host_record_dict(nb_dnshostrecord_atrs)) + self.nb_adapter.add(nb_ds_hostrecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.HOST_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + job_logger = Mock() + infoblox_adapter.job.logger = job_logger + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_called_once() + infoblox_adapter.conn.create_host_record.assert_called_with( + fqdn="server1.local.test.net", ip_address="10.0.0.1", comment="Test Host Record", network_view="default" + ) + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_called_once() + mock_validate_dns_name.assert_called_with( + infoblox_client=mock_client, dns_name="server1.local.test.net", network_view="default" + ) + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=True, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_host_record_create_no_dns_name(self, mock_tag_involved_objects, mock_validate_dns_name): + """Ensure DNS Host record is not created if DNS name is missing.""" + nb_dnshostrecord_atrs = {"dns_name": ""} + nb_ds_hostrecord = self.nb_adapter.dnshostrecord(**_get_dns_host_record_dict(nb_dnshostrecord_atrs)) + self.nb_adapter.add(nb_ds_hostrecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.HOST_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + job_logger = Mock() + infoblox_adapter.job.logger = job_logger + self.nb_adapter.sync_to(infoblox_adapter) + log_msg = "Cannot create Infoblox DNS Host record for IP Address 10.0.0.1. DNS name is not defined." + job_logger.warning.assert_called_with(log_msg) + + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_not_called() + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=False, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_host_record_create_invalid_dns_name(self, mock_tag_involved_objects, mock_validate_dns_name): + """Ensure DNS Host record is not created if DNS name is invalid.""" + nb_dnshostrecord_atrs = {"dns_name": ".invalid-dns-name"} + nb_ds_hostrecord = self.nb_adapter.dnshostrecord(**_get_dns_host_record_dict(nb_dnshostrecord_atrs)) + self.nb_adapter.add(nb_ds_hostrecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.HOST_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + infoblox_adapter.job = Mock() + job_logger = Mock() + infoblox_adapter.job.logger = job_logger + self.nb_adapter.sync_to(infoblox_adapter) + log_msg = "Invalid zone fqdn in DNS name `.invalid-dns-name` for IP Address 10.0.0.1." + job_logger.warning.assert_called_with(log_msg) + + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_called_once() + mock_validate_dns_name.assert_called_with( + infoblox_client=mock_client, dns_name=".invalid-dns-name", network_view="default" + ) + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=True, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_host_record_update(self, mock_tag_involved_objects, mock_validate_dns_name): + """Ensure only Host record is updated.""" + nb_dnshostrecord_atrs = {"dns_name": "server2.local.test.net"} + nb_ds_hostrecord = self.nb_adapter.dnshostrecord(**_get_dns_host_record_dict(nb_dnshostrecord_atrs)) + self.nb_adapter.add(nb_ds_hostrecord) + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.HOST_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_hostrecord_atrs = { + "dns_name": "server1.local.test.net", + "ref": "record:host/xyz", + } + inf_ds_hostrecord = infoblox_adapter.dnshostrecord(**_get_dns_host_record_dict(inf_hostrecord_atrs)) + infoblox_adapter.add(inf_ds_hostrecord) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.update_host_record.assert_called_once() + infoblox_adapter.conn.update_host_record.assert_called_with( + ref="record:host/xyz", data={"name": "server2.local.test.net"} + ) + infoblox_adapter.conn.create_host_record.assert_not_called() + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.update_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.update_ptr_record.assert_not_called() + infoblox_adapter.conn.update_fixed_address.assert_not_called() + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_called_once() + mock_validate_dns_name.assert_called_with( + infoblox_client=mock_client, dns_name="server2.local.test.net", network_view="default" + ) + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=False, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_host_record_update_invalid_dns_name(self, mock_tag_involved_objects, mock_validate_dns_name): + """Ensure DNS Host record is not updated if DNS name is invalid.""" + nb_dnshostrecord_atrs = {"dns_name": ".invalid-dns-name"} + nb_ds_hostrecord = self.nb_adapter.dnshostrecord(**_get_dns_host_record_dict(nb_dnshostrecord_atrs)) + self.nb_adapter.add(nb_ds_hostrecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.HOST_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_hostrecord_atrs = { + "dns_name": "server1.local.test.net", + "ref": "record:host/xyz", + } + inf_ds_hostrecord = infoblox_adapter.dnshostrecord(**_get_dns_host_record_dict(inf_hostrecord_atrs)) + infoblox_adapter.add(inf_ds_hostrecord) + infoblox_adapter.job = Mock() + job_logger = Mock() + infoblox_adapter.job.logger = job_logger + self.nb_adapter.sync_to(infoblox_adapter) + log_msg = "Invalid zone fqdn in DNS name `.invalid-dns-name` for IP Address 10.0.0.1." + job_logger.warning.assert_called_with(log_msg) + + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_called_once() + mock_validate_dns_name.assert_called_with( + infoblox_client=mock_client, dns_name=".invalid-dns-name", network_view="default" + ) + infoblox_adapter.conn.update_host_record.assert_not_called() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_host_record_delete_fail(self, mock_tag_involved_objects): + """Ensure DNS Host record is not deleted if object deletion is not enabled in the config.""" + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.HOST_RECORD + self.config.infoblox_deletable_models = [] + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_hostrecord_atrs = { + "dns_name": "server1.local.test.net", + "ref": "record:host/xyz", + } + inf_ds_hostrecord = infoblox_adapter.dnshostrecord(**_get_dns_host_record_dict(inf_hostrecord_atrs)) + infoblox_adapter.add(inf_ds_hostrecord) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.delete_host_record_by_ref.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_host_record_delete_success(self, mock_tag_involved_objects): + """Ensure DNS Host record is deleted if object deletion is enabled in the config.""" + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.HOST_RECORD + self.config.infoblox_deletable_models = [InfobloxDeletableModelChoices.DNS_HOST_RECORD] + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_hostrecord_atrs = { + "dns_name": "server1.local.test.net", + "ref": "record:host/xyz", + } + inf_ds_hostrecord = infoblox_adapter.dnshostrecord(**_get_dns_host_record_dict(inf_hostrecord_atrs)) + infoblox_adapter.add(inf_ds_hostrecord) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.delete_host_record_by_ref.assert_called_once() + infoblox_adapter.conn.delete_host_record_by_ref.assert_called_with(ref="record:host/xyz") + mock_tag_involved_objects.assert_called_once() + + +class TestModelInfobloxDnsPTRRecord(TestCase): + """Tests DNS PTR model operations.""" + + def setUp(self): + "Test class set up." + self.config = create_default_infoblox_config() + self.nb_adapter = NautobotAdapter(config=self.config) + self.nb_adapter.job = Mock() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=True, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ptr_record_create(self, mock_tag_involved_objects, mock_validate_dns_name): + """Validate PTR record is created.""" + nb_arecord_atrs = {} + nb_ds_arecord = self.nb_adapter.dnsarecord(**_get_dns_a_record_dict(nb_arecord_atrs)) + self.nb_adapter.add(nb_ds_arecord) + nb_ptrrecord_atrs = {} + nb_ds_ptrrecord = self.nb_adapter.dnsptrrecord(**_get_dns_ptr_record_dict(nb_ptrrecord_atrs)) + self.nb_adapter.add(nb_ds_ptrrecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_AND_PTR_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_arecord_atrs = {} + inf_ds_arecord = infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + infoblox_adapter.add(inf_ds_arecord) + infoblox_adapter.job = Mock() + job_logger = Mock() + infoblox_adapter.job.logger = job_logger + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.create_ptr_record.assert_called_once() + infoblox_adapter.conn.create_ptr_record.assert_called_with( + fqdn="server1.local.test.net", ip_address="10.0.0.1", comment="Test PTR Record", network_view="default" + ) + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_called_once() + mock_validate_dns_name.assert_called_with( + infoblox_client=mock_client, dns_name="server1.local.test.net", network_view="default" + ) + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=True, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ptr_record_create_no_dns_name(self, mock_tag_involved_objects, mock_validate_dns_name): + """Ensure DNS PTR record is not created if DNS name is missing.""" + nb_arecord_atrs = {} + nb_ds_arecord = self.nb_adapter.dnsarecord(**_get_dns_a_record_dict(nb_arecord_atrs)) + self.nb_adapter.add(nb_ds_arecord) + nb_ptrrecord_atrs = {"dns_name": ""} + nb_ds_ptrrecord = self.nb_adapter.dnsptrrecord(**_get_dns_ptr_record_dict(nb_ptrrecord_atrs)) + self.nb_adapter.add(nb_ds_ptrrecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_AND_PTR_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_arecord_atrs = {} + inf_ds_arecord = infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + infoblox_adapter.add(inf_ds_arecord) + infoblox_adapter.job = Mock() + job_logger = Mock() + infoblox_adapter.job.logger = job_logger + self.nb_adapter.sync_to(infoblox_adapter) + log_msg = "Cannot create Infoblox PTR DNS record for IP Address 10.0.0.1. DNS name is not defined." + job_logger.warning.assert_called_with(log_msg) + + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_not_called() + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=False, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ptr_record_create_invalid_dns_name(self, mock_tag_involved_objects, mock_validate_dns_name): + """Ensure DNS PTR record is not created if DNS name is invalid.""" + nb_arecord_atrs = {} + nb_ds_arecord = self.nb_adapter.dnsarecord(**_get_dns_a_record_dict(nb_arecord_atrs)) + self.nb_adapter.add(nb_ds_arecord) + nb_ptrrecord_atrs = {"dns_name": ".invalid-dns-name"} + nb_ds_ptrrecord = self.nb_adapter.dnsptrrecord(**_get_dns_ptr_record_dict(nb_ptrrecord_atrs)) + self.nb_adapter.add(nb_ds_ptrrecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_AND_PTR_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_arecord_atrs = {} + inf_ds_arecord = infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + infoblox_adapter.add(inf_ds_arecord) + infoblox_adapter.job = Mock() + job_logger = Mock() + infoblox_adapter.job.logger = job_logger + self.nb_adapter.sync_to(infoblox_adapter) + log_msg = "Invalid zone fqdn in DNS name `.invalid-dns-name` for IP Address 10.0.0.1." + job_logger.warning.assert_called_with(log_msg) + + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_called_once() + mock_validate_dns_name.assert_called_with( + infoblox_client=mock_client, dns_name=".invalid-dns-name", network_view="default" + ) + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=True, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ptr_record_update(self, mock_tag_involved_objects, mock_validate_dns_name): + """Ensure PTR records is updated.""" + nb_arecord_atrs = {} + nb_ds_arecord = self.nb_adapter.dnsarecord(**_get_dns_a_record_dict(nb_arecord_atrs)) + self.nb_adapter.add(nb_ds_arecord) + nb_ptrrecord_atrs = {"dns_name": "server2.local.test.net"} + nb_ds_ptrrecord = self.nb_adapter.dnsptrrecord(**_get_dns_ptr_record_dict(nb_ptrrecord_atrs)) + self.nb_adapter.add(nb_ds_ptrrecord) + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_AND_PTR_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_arecord_atrs = {} + inf_ds_arecord = infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + infoblox_adapter.add(inf_ds_arecord) + inf_ptrrecord_atrs = { + "dns_name": "server1.local.test.net", + "ref": "record:ptr/xyz", + } + inf_ds_ptrrecord = infoblox_adapter.dnsptrrecord(**_get_dns_ptr_record_dict(inf_ptrrecord_atrs)) + infoblox_adapter.add(inf_ds_ptrrecord) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.update_ptr_record.assert_called_once() + infoblox_adapter.conn.update_ptr_record.assert_called_with( + ref="record:ptr/xyz", data={"ptrdname": "server2.local.test.net"} + ) + infoblox_adapter.conn.update_a_record.assert_not_called() + infoblox_adapter.conn.create_host_record.assert_not_called() + infoblox_adapter.conn.update_host_record.assert_not_called() + infoblox_adapter.conn.create_a_record.assert_not_called() + infoblox_adapter.conn.create_ptr_record.assert_not_called() + infoblox_adapter.conn.update_fixed_address.assert_not_called() + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_called_once() + mock_validate_dns_name.assert_called_with( + infoblox_client=mock_client, dns_name="server2.local.test.net", network_view="default" + ) + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.models.infoblox.validate_dns_name", + autospec=True, + return_value=False, + ) + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ptr_record_update_invalid_dns_name(self, mock_tag_involved_objects, mock_validate_dns_name): + """Ensure DNS PTR record is not updated if DNS name is invalid.""" + nb_arecord_atrs = {} + nb_ds_arecord = self.nb_adapter.dnsarecord(**_get_dns_a_record_dict(nb_arecord_atrs)) + self.nb_adapter.add(nb_ds_arecord) + nb_ptrrecord_atrs = {"dns_name": ".invalid-dns-name"} + nb_ds_ptrrecord = self.nb_adapter.dnsptrrecord(**_get_dns_ptr_record_dict(nb_ptrrecord_atrs)) + self.nb_adapter.add(nb_ds_ptrrecord) + self.nb_adapter.load() + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_AND_PTR_RECORD + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_arecord_atrs = {} + inf_ds_arecord = infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + infoblox_adapter.add(inf_ds_arecord) + infoblox_adapter.job = Mock() + job_logger = Mock() + infoblox_adapter.job.logger = job_logger + self.nb_adapter.sync_to(infoblox_adapter) + log_msg = "Invalid zone fqdn in DNS name `.invalid-dns-name` for IP Address 10.0.0.1." + job_logger.warning.assert_called_with(log_msg) + + infoblox_adapter.conn.update_ptr_record.assert_not_called() + mock_tag_involved_objects.assert_called_once() + mock_validate_dns_name.assert_called_once() + mock_validate_dns_name.assert_called_with( + infoblox_client=mock_client, dns_name=".invalid-dns-name", network_view="default" + ) + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ptr_record_delete_fail(self, mock_tag_involved_objects): + """Ensure DNS PTR record is not deleted if object deletion is not enabled in the config.""" + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_AND_PTR_RECORD + self.config.infoblox_deletable_models = [] + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_ptrrecord_atrs = { + "dns_name": "server1.local.test.net", + "ref": "record:ptr/xyz", + } + inf_ds_ptrrecord = infoblox_adapter.dnsptrrecord(**_get_dns_ptr_record_dict(inf_ptrrecord_atrs)) + infoblox_adapter.add(inf_ds_ptrrecord) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.delete_ptr_record_by_ref.assert_not_called() + mock_tag_involved_objects.assert_called_once() + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot.NautobotMixin.tag_involved_objects", + autospec=True, + ) + def test_ptr_record_delete_success(self, mock_tag_involved_objects): + """Ensure DNS PTR record is deleted if object deletion is enabled in the config.""" + self.nb_adapter.load() + + with unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.client.InfobloxApi", autospec=True + ) as mock_client: + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.A_AND_PTR_RECORD + self.config.infoblox_deletable_models = [InfobloxDeletableModelChoices.DNS_PTR_RECORD] + infoblox_adapter = InfobloxAdapter(conn=mock_client, config=self.config) + infoblox_adapter.job = Mock() + inf_ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(inf_ds_namespace) + inf_ptrrecord_atrs = { + "dns_name": "server1.local.test.net", + "ref": "record:ptr/xyz", + } + inf_ds_ptrrecord = infoblox_adapter.dnsptrrecord(**_get_dns_ptr_record_dict(inf_ptrrecord_atrs)) + infoblox_adapter.add(inf_ds_ptrrecord) + self.nb_adapter.sync_to(infoblox_adapter) + infoblox_adapter.conn.delete_ptr_record_by_ref.assert_called_once() + infoblox_adapter.conn.delete_ptr_record_by_ref.assert_called_with(ref="record:ptr/xyz") + mock_tag_involved_objects.assert_called_once() diff --git a/nautobot_ssot/tests/infoblox/test_models.py b/nautobot_ssot/tests/infoblox/test_models.py new file mode 100644 index 000000000..2303b7da3 --- /dev/null +++ b/nautobot_ssot/tests/infoblox/test_models.py @@ -0,0 +1,423 @@ +# pylint: disable=R0801 +"""Infoblox Integration model tests.""" +import os +from copy import deepcopy +from unittest import mock + +from django.core.exceptions import ValidationError +from django.test import TestCase +from nautobot.extras.choices import SecretsGroupAccessTypeChoices, SecretsGroupSecretTypeChoices +from nautobot.extras.models import ExternalIntegration, Secret, SecretsGroup, SecretsGroupAssociation, Status + +from nautobot_ssot.integrations.infoblox.choices import DNSRecordTypeChoices, FixedAddressTypeChoices +from nautobot_ssot.integrations.infoblox.models import SSOTInfobloxConfig + + +@mock.patch.dict(os.environ, {"INFOBLOX_USERNAME": "username", "INFOBLOX_PASSWORD": "password"}) +class SSOTInfobloxConfigTestCase(TestCase): # pylint: disable=too-many-public-methods + """Tests for the HardwareLCM models.""" + + def setUp(self): + """Setup testing.""" + self.default_status, _ = Status.objects.get_or_create(name="Active") + sync_filters = [{"network_view": "default"}] + + infoblox_request_timeout = 60 + secrets_group, _ = SecretsGroup.objects.get_or_create(name="InfobloxSSOTUnitTest") + inf_username, _ = Secret.objects.get_or_create( + name="Infoblox Username - InfobloxSSOTUnitTest", + defaults={ + "provider": "environment-variable", + "parameters": {"variable": "INFOBLOX_USERNAME"}, + }, + ) + inf_password, _ = Secret.objects.get_or_create( + name="Infoblox Password - InfobloxSSOTUnitTest", + defaults={ + "provider": "environment-variable", + "parameters": {"variable": "INFOBLOX_PASSWORD"}, + }, + ) + self.sg_username, _ = SecretsGroupAssociation.objects.get_or_create( + secrets_group=secrets_group, + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_USERNAME, + defaults={ + "secret": inf_username, + }, + ) + self.sg_password, _ = SecretsGroupAssociation.objects.get_or_create( + secrets_group=secrets_group, + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_PASSWORD, + defaults={ + "secret": inf_password, + }, + ) + self.external_integration = ExternalIntegration.objects.create( + name="InfobloxModelUnitTestInstance", + remote_url="https://infoblox..me.local", + secrets_group=secrets_group, + verify_ssl=True, + timeout=infoblox_request_timeout, + ) + + self.infoblox_config_dict = { + "name": "InfobloxModelUnitTestConfig", + "description": "Unit Test Config", + "default_status": self.default_status, + "infoblox_wapi_version": "v2.12", + "infoblox_instance": self.external_integration, + "enable_sync_to_infoblox": True, + "import_ip_addresses": True, + "import_subnets": True, + "import_vlan_views": True, + "import_vlans": True, + "import_ipv4": True, + "import_ipv6": False, + "job_enabled": True, + "infoblox_sync_filters": sync_filters, + "infoblox_dns_view_mapping": {"default": "default.default"}, + "cf_fields_ignore": {"extensible_attributes": [], "custom_fields": []}, + "fixed_address_type": FixedAddressTypeChoices.DONT_CREATE_RECORD, + "dns_record_type": DNSRecordTypeChoices.HOST_RECORD, + } + + def test_create_infoblox_config_required_fields_only(self): + """Successfully create Infoblox config with required fields only.""" + inf_cfg = SSOTInfobloxConfig( + name="InfobloxModelUnitTestConfigReqOnly", + default_status=self.default_status, + infoblox_instance=self.external_integration, + ) + inf_cfg.validated_save() + + inf_cfg_db = SSOTInfobloxConfig.objects.get(name="InfobloxModelUnitTestConfigReqOnly") + + self.assertEqual(inf_cfg_db.name, "InfobloxModelUnitTestConfigReqOnly") + self.assertEqual(inf_cfg_db.description, "") + self.assertEqual(inf_cfg_db.default_status, self.default_status) + self.assertEqual(inf_cfg_db.infoblox_instance, self.external_integration) + self.assertEqual(inf_cfg_db.infoblox_wapi_version, "v2.12") + self.assertEqual(inf_cfg_db.enable_sync_to_infoblox, False) + self.assertEqual(inf_cfg_db.import_ip_addresses, False) + self.assertEqual(inf_cfg_db.import_subnets, False) + self.assertEqual(inf_cfg_db.import_vlan_views, False) + self.assertEqual(inf_cfg_db.import_vlans, False) + self.assertEqual(inf_cfg_db.infoblox_sync_filters, [{"network_view": "default"}]) + self.assertEqual(inf_cfg_db.infoblox_dns_view_mapping, {}) + self.assertEqual(inf_cfg_db.cf_fields_ignore, {"custom_fields": [], "extensible_attributes": []}) + self.assertEqual(inf_cfg_db.import_ipv4, True) + self.assertEqual(inf_cfg_db.import_ipv6, False) + self.assertEqual(inf_cfg_db.fixed_address_type, FixedAddressTypeChoices.DONT_CREATE_RECORD) + self.assertEqual(inf_cfg_db.dns_record_type, DNSRecordTypeChoices.HOST_RECORD) + self.assertEqual(inf_cfg_db.job_enabled, False) + + def test_create_infoblox_config_all_fields(self): + """Successfully create Infoblox config with all field.""" + inf_cfg = SSOTInfobloxConfig( + name="InfobloxModelUnitTestConfigAllFields", + default_status=self.default_status, + infoblox_instance=self.external_integration, + infoblox_wapi_version="v2.12", + enable_sync_to_infoblox=True, + import_ip_addresses=True, + import_subnets=True, + import_vlan_views=True, + import_vlans=True, + import_ipv4=False, + import_ipv6=True, + job_enabled=True, + infoblox_sync_filters=[{"network_view": "dev"}], + infoblox_dns_view_mapping={"default": "default.default"}, + cf_fields_ignore={"extensible_attributes": ["aws_id"], "custom_fields": ["po_no"]}, + fixed_address_type=FixedAddressTypeChoices.MAC_ADDRESS, + dns_record_type=DNSRecordTypeChoices.A_RECORD, + ) + inf_cfg.validated_save() + + inf_cfg_db = SSOTInfobloxConfig.objects.get(name="InfobloxModelUnitTestConfigAllFields") + + self.assertEqual(inf_cfg_db.name, "InfobloxModelUnitTestConfigAllFields") + self.assertEqual(inf_cfg_db.description, "") + self.assertEqual(inf_cfg_db.default_status, self.default_status) + self.assertEqual(inf_cfg_db.infoblox_instance, self.external_integration) + self.assertEqual(inf_cfg_db.infoblox_wapi_version, "v2.12") + self.assertEqual(inf_cfg_db.enable_sync_to_infoblox, True) + self.assertEqual(inf_cfg_db.import_ip_addresses, True) + self.assertEqual(inf_cfg_db.import_subnets, True) + self.assertEqual(inf_cfg_db.import_vlan_views, True) + self.assertEqual(inf_cfg_db.import_vlans, True) + self.assertEqual(inf_cfg_db.infoblox_sync_filters, [{"network_view": "dev"}]) + self.assertEqual(inf_cfg_db.infoblox_dns_view_mapping, {"default": "default.default"}) + self.assertEqual(inf_cfg_db.cf_fields_ignore, {"extensible_attributes": ["aws_id"], "custom_fields": ["po_no"]}) + self.assertEqual(inf_cfg_db.import_ipv4, False) + self.assertEqual(inf_cfg_db.import_ipv6, True) + self.assertEqual(inf_cfg_db.fixed_address_type, FixedAddressTypeChoices.MAC_ADDRESS) + self.assertEqual(inf_cfg_db.dns_record_type, DNSRecordTypeChoices.A_RECORD) + self.assertEqual(inf_cfg_db.job_enabled, True) + + def test_infoblox_sync_filters_must_be_a_list(self): + """infoblox_sync_filters must be a list.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = {"k": "v"} + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertEqual(failure_exception.exception.messages[0], "Sync filters must be a list.") + + def test_infoblox_sync_filters_filter_must_be_dict(self): + """Individual filter in infoblox_sync_filters must be a dict.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [""] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertEqual(failure_exception.exception.messages[0], "Sync filter must be a dict.") + + def test_infoblox_sync_filters_invalid_key_found(self): + """Only keys allowed in a filter are `network_view`, `prefixes_ipv4` and `prefixes_ipv6`.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [{"prefixes": [], "name": "myname", "network_view": "dev"}] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertIn("Invalid keys found in the sync filter", failure_exception.exception.messages[0]) + + def test_infoblox_sync_filters_no_network_view_key(self): + """Prefix filter must have a `network_view` key defined.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [{"prefixes_ipv4": ["10.0.0.0/24"]}] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertEqual(failure_exception.exception.messages[0], "Sync filter must have `network_view` key defined.") + + def test_infoblox_sync_filters_network_view_invalid_type(self): + """Key `network_view` must be a string.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [{"network_view": []}] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertEqual(failure_exception.exception.messages[0], "Value of the `network_view` key must be a string.") + + def test_infoblox_sync_filters_duplicate_network_view(self): + """Duplicate values for `network_view` are not allowed.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [{"network_view": "dev"}, {"network_view": "dev"}] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertEqual(failure_exception.exception.messages[0], "Duplicate value for the `network_view` found: dev.") + + def test_infoblox_sync_filters_prefixes_ipv4_must_be_list(self): + """Value of `prefixes_ipv4` key must be a list.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [{"network_view": "dev", "prefixes_ipv4": ""}] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertEqual(failure_exception.exception.messages[0], "Value of the `prefixes_ipv4` key must be a list.") + + def test_infoblox_sync_filters_prefixes_ipv4_must_not_be_an_empty_list(self): + """Value of `prefixes_ipv4` key must not be an empty list.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [{"network_view": "dev", "prefixes_ipv4": []}] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertEqual( + failure_exception.exception.messages[0], "Value of the `prefixes_ipv4` key must not be an empty list." + ) + + def test_infoblox_sync_filters_prefixes_ipv4_must_have_prefix_length(self): + """Prefix in `prefixes_ipv4` must have prefix length defined.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [{"network_view": "dev", "prefixes_ipv4": ["10.0.0.0"]}] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertEqual( + failure_exception.exception.messages[0], + "IPv4 prefix must have a prefix length defined using `/` format: 10.0.0.0.", + ) + + def test_infoblox_sync_filters_prefixes_ipv4_must_be_valid_prefix(self): + """Prefix in `prefixes_ipv4` must be valid.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [{"network_view": "dev", "prefixes_ipv4": ["10.0.0/24"]}] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertIn("IPv4 prefix parsing error", failure_exception.exception.messages[0]) + + def test_infoblox_sync_filters_prefixes_ipv6_must_be_list(self): + """Value of `prefixes_ipv6` key must be a list.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [{"network_view": "dev", "prefixes_ipv6": ""}] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertEqual(failure_exception.exception.messages[0], "Value of the `prefixes_ipv6` key must be a list.") + + def test_infoblox_sync_filters_prefixes_ipv6_must_not_be_an_empty_list(self): + """Value of `prefixes_ipv6` key must not be an empty list.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [{"network_view": "dev", "prefixes_ipv6": []}] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertEqual( + failure_exception.exception.messages[0], "Value of the `prefixes_ipv6` key must not be an empty list." + ) + + def test_infoblox_sync_filters_prefixes_ipv6_must_have_prefix_length(self): + """Prefix in `prefixes_ipv6` must have prefix length defined.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [{"network_view": "dev", "prefixes_ipv6": ["2001:5b0:4100::"]}] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertEqual( + failure_exception.exception.messages[0], + "IPv6 prefix must have a prefix length defined using `/` format: 2001:5b0:4100::.", + ) + + def test_infoblox_sync_filters_prefixes_ipv6_must_be_valid_prefix(self): + """Prefix in `prefixes_ipv6` must be valid.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_sync_filters"] = [{"network_view": "dev", "prefixes_ipv6": ["2001::5b0:4100::/40"]}] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_sync_filters", failure_exception.exception.error_dict) + self.assertIn("IPv6 prefix parsing error", failure_exception.exception.messages[0]) + + def test_infoblox_instance_must_have_secrets_group(self): + """External integration for Infoblox instance must have secrets group assigned.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_instance"].secrets_group = None + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_instance", failure_exception.exception.error_dict) + self.assertEqual( + failure_exception.exception.messages[0], "Infoblox instance must have Secrets groups assigned." + ) + + def test_infoblox_instance_must_have_secrets_rest_username(self): + """Secrets associated with secret group used by Infoblox Instance must be of correct type.""" + inf_dict = deepcopy(self.infoblox_config_dict) + infoblox_config = SSOTInfobloxConfig(**inf_dict) + self.sg_username.secret_type = SecretsGroupSecretTypeChoices.TYPE_TOKEN + self.sg_username.save() + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_instance", failure_exception.exception.error_dict) + self.assertEqual( + failure_exception.exception.messages[0], + "Secrets group for the Infoblox instance must have secret with type Username and access type REST.", + ) + self.sg_username.secret_type = SecretsGroupSecretTypeChoices.TYPE_USERNAME + self.sg_username.save() + self.sg_password.access_type = SecretsGroupAccessTypeChoices.TYPE_CONSOLE + self.sg_password.save() + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_instance", failure_exception.exception.error_dict) + self.assertEqual( + failure_exception.exception.messages[0], + "Secrets group for the Infoblox instance must have secret with type Password and access type REST.", + ) + self.sg_password.access_type = SecretsGroupAccessTypeChoices.TYPE_REST + self.sg_password.save() + + def test_infoblox_import_ip_at_least_one_chosen(self): + """At least one of `import_ipv4` or `import_ipv6` must be selected.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["import_ipv4"] = False + inf_dict["import_ipv6"] = False + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("import_ipv4", failure_exception.exception.error_dict) + self.assertIn("import_ipv6", failure_exception.exception.error_dict) + self.assertEqual( + failure_exception.exception.error_dict["import_ipv4"][0].message, + "At least one of `import_ipv4` or `import_ipv6` must be set to True.", + ) + self.assertEqual( + failure_exception.exception.error_dict["import_ipv6"][0].message, + "At least one of `import_ipv4` or `import_ipv6` must be set to True.", + ) + + def test_infoblox_infoblox_dns_view_mapping_must_be_dict(self): + """Value of `infoblox_dns_view_mapping` key must be a dict.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["infoblox_dns_view_mapping"] = [] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("infoblox_dns_view_mapping", failure_exception.exception.error_dict) + self.assertEqual( + failure_exception.exception.messages[0], + "`infoblox_dns_view_mapping` must be a dictionary mapping network view names to dns view names.", + ) + + def test_infoblox_infoblox_cf_fields_ignore_must_be_dict(self): + """Value of `cf_fields_ignore` key must be a dict.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["cf_fields_ignore"] = [] + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("cf_fields_ignore", failure_exception.exception.error_dict) + self.assertEqual(failure_exception.exception.messages[0], "`cf_fields_ignore` must be a dictionary.") + + def test_infoblox_infoblox_cf_fields_key_names_must_be_valid(self): + """Only `extensible_attributes` and `custom_fields` keys are allowed in `cf_fields_ignore`.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["cf_fields_ignore"] = {"fields": []} + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("cf_fields_ignore", failure_exception.exception.error_dict) + self.assertEqual( + failure_exception.exception.messages[0], + "Invalid key name `fields`. Only `extensible_attributes` and `custom_fields` are allowed.", + ) + + def test_infoblox_infoblox_cf_fields_values_must_be_list_of_string(self): + """`infoblox_cf_fields` key values must be list of strings.""" + inf_dict = deepcopy(self.infoblox_config_dict) + inf_dict["cf_fields_ignore"] = {"extensible_attributes": ["ea1", 2]} + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("cf_fields_ignore", failure_exception.exception.error_dict) + self.assertEqual( + failure_exception.exception.messages[0], "Value of key `extensible_attributes` must be a list of strings." + ) + + inf_dict["cf_fields_ignore"] = {"custom_fields": ["cf1", 2]} + infoblox_config = SSOTInfobloxConfig(**inf_dict) + with self.assertRaises(ValidationError) as failure_exception: + infoblox_config.full_clean() + self.assertIn("cf_fields_ignore", failure_exception.exception.error_dict) + self.assertEqual( + failure_exception.exception.messages[0], "Value of key `custom_fields` must be a list of strings." + ) diff --git a/nautobot_ssot/tests/infoblox/test_nautobot_adapter.py b/nautobot_ssot/tests/infoblox/test_nautobot_adapter.py index 2a8349d09..260c160a7 100644 --- a/nautobot_ssot/tests/infoblox/test_nautobot_adapter.py +++ b/nautobot_ssot/tests/infoblox/test_nautobot_adapter.py @@ -1,12 +1,15 @@ """Nautobot Adapter tests.""" +from unittest import mock + from django.contrib.contenttypes.models import ContentType from django.test import TestCase +from nautobot.extras.models import RelationshipAssociation, Status +from nautobot.ipam.models import VLAN, IPAddress, Namespace, Prefix, VLANGroup -from nautobot.extras.models import Relationship, RelationshipAssociation, Status -from nautobot.ipam.models import Prefix, VLAN, VLANGroup - +from nautobot_ssot.integrations.infoblox.choices import DNSRecordTypeChoices from nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot import NautobotAdapter +from nautobot_ssot.tests.infoblox.fixtures_infoblox import create_default_infoblox_config, create_prefix_relationship class TestNautobotAdapter(TestCase): @@ -14,7 +17,7 @@ class TestNautobotAdapter(TestCase): def setUp(self): active_status = Status.objects.get(name="Active") - prefix_vlan_relationship = Relationship.objects.get(label="Prefix -> VLAN") + prefix_vlan_relationship = create_prefix_relationship() vlan_group1 = VLANGroup.objects.create(name="one") vlan_group2 = VLANGroup.objects.create(name="two") vlan10 = VLAN.objects.create( @@ -46,6 +49,8 @@ def setUp(self): status=active_status, vlan_group=vlan_group2, ) + namespace_dev, _ = Namespace.objects.get_or_create(name="dev") + namespace_test, _ = Namespace.objects.get_or_create(name="test") prefix1 = Prefix.objects.create( prefix="10.0.0.0/24", status=active_status, @@ -60,12 +65,100 @@ def setUp(self): ) prefix1.cf["dhcp_ranges"] = "10.0.0.50-10.0.0.254" prefix1.save() - Prefix.objects.create( + prefix2 = Prefix.objects.create( + prefix="10.0.1.0/24", + status=active_status, + type="Network", + ) + prefix3 = Prefix.objects.create( prefix="10.0.1.0/24", status=active_status, type="Network", + namespace=namespace_dev, + ) + prefix4 = Prefix.objects.create( + prefix="10.2.1.0/24", + status=active_status, + type="Network", + namespace=namespace_dev, + ) + prefix5 = Prefix.objects.create( + prefix="10.2.1.0/25", + status=active_status, + type="Network", + namespace=namespace_test, + ) + prefix6 = Prefix.objects.create( + prefix="10.5.1.0/25", + status=active_status, + type="Network", + namespace=namespace_test, + ) + ipv6prefix1 = Prefix.objects.create( + prefix="2001:5b0:4100::/48", + status=active_status, + type="Network", + ) + IPAddress.objects.create( + description="Test IPAddress 1", + address="10.0.1.1/24", + status=active_status, + type="host", + dns_name="server1.nautobot.test.com", + parent_id=prefix2.id, + ) + IPAddress.objects.create( + description="Test IPAddress 2", + address="10.0.1.2/24", + status=active_status, + type="host", + dns_name="server2.nautobot.test.com", + parent_id=prefix2.id, + ) + IPAddress.objects.create( + description="Test IPAddress 3", + address="10.0.1.1/24", + status=active_status, + type="host", + dns_name="server10.nautobot.test.com", + parent_id=prefix3.id, + ) + IPAddress.objects.create( + description="Test IPAddress 4", + address="10.2.1.1/24", + status=active_status, + type="host", + dns_name="server11.nautobot.test.com", + parent_id=prefix4.id, + ) + IPAddress.objects.create( + description="Test IPAddress 5", + address="10.2.1.10/25", + status=active_status, + type="host", + dns_name="server20.nautobot.test.com", + parent_id=prefix5.id, + ) + IPAddress.objects.create( + description="Test IPAddress 6", + address="10.5.1.5/25", + status=active_status, + type="host", + dns_name="server21.nautobot.test.com", + parent_id=prefix6.id, ) - self.nb_adapter = NautobotAdapter() + IPAddress.objects.create( + description="Test IPv6Address 1", + address="2001:5b0:4100::1/48", + status=active_status, + type="host", + dns_name="v6server1.nautobot.test.com", + parent_id=ipv6prefix1.id, + ) + self.config = create_default_infoblox_config() + self.sync_filters = self.config.infoblox_sync_filters + self.nb_adapter = NautobotAdapter(config=self.config) + self.nb_adapter.job = mock.Mock() def test_load_vlans_loads_expected_vlans(self): self.nb_adapter.load_vlans() @@ -79,16 +172,195 @@ def test_load_vlans_does_not_load_ungrouped_vlans(self): self.assertFalse(10 in actual_vlan_ids) def test_load_prefixes_loads_prefixes(self): - self.nb_adapter.load_prefixes() - actual_prefixes = {prefix.network for prefix in self.nb_adapter.get_all("prefix")} - self.assertEqual(actual_prefixes, {"10.0.0.0/24", "10.0.1.0/24"}) + self.nb_adapter.load_prefixes(include_ipv4=True, include_ipv6=False, sync_filters=self.sync_filters) + actual_prefixes = {(prefix.network, prefix.namespace) for prefix in self.nb_adapter.get_all("prefix")} + self.assertEqual(actual_prefixes, {("10.0.0.0/24", "Global"), ("10.0.1.0/24", "Global")}) + + def test_load_prefixes_loads_prefixes_dev_namespace(self): + sync_filters = [{"network_view": "dev"}] + self.nb_adapter.load_prefixes(include_ipv4=True, include_ipv6=False, sync_filters=sync_filters) + actual_prefixes = {(prefix.network, prefix.namespace) for prefix in self.nb_adapter.get_all("prefix")} + self.assertEqual( + actual_prefixes, + {("10.0.1.0/24", "dev"), ("10.2.1.0/24", "dev")}, + ) + + def test_load_prefixes_loads_prefixes_dev_namespace_ipv4_filter(self): + sync_filters = [{"network_view": "dev", "prefixes_ipv4": ["10.0.0.0/16"]}] + self.nb_adapter.load_prefixes(include_ipv4=True, include_ipv6=False, sync_filters=sync_filters) + actual_prefixes = {(prefix.network, prefix.namespace) for prefix in self.nb_adapter.get_all("prefix")} + self.assertEqual( + actual_prefixes, + { + ("10.0.1.0/24", "dev"), + }, + ) + + def test_load_prefixes_loads_prefixes_multiple_filters(self): + sync_filters = [ + {"network_view": "dev", "prefixes_ipv4": ["10.0.0.0/16"]}, + {"network_view": "test", "prefixes_ipv4": ["10.0.0.0/8"]}, + ] + self.nb_adapter.load_prefixes(include_ipv4=True, include_ipv6=False, sync_filters=sync_filters) + actual_prefixes = {(prefix.network, prefix.namespace) for prefix in self.nb_adapter.get_all("prefix")} + self.assertEqual( + actual_prefixes, + { + ("10.0.1.0/24", "dev"), + ("10.2.1.0/25", "test"), + ("10.5.1.0/25", "test"), + }, + ) + + def test_load_prefixes_loads_prefixes_ipv6(self): + sync_filters = [{"network_view": "default"}] + self.nb_adapter.load_prefixes(include_ipv4=False, include_ipv6=True, sync_filters=sync_filters) + actual_prefixes = {(prefix.network, prefix.namespace) for prefix in self.nb_adapter.get_all("prefix")} + self.assertEqual( + actual_prefixes, + { + ("2001:5b0:4100::/48", "Global"), + }, + ) + + def test_load_prefixes_loads_prefixes_ipv4_and_ipv6(self): + sync_filters = [{"network_view": "default"}] + self.nb_adapter.load_prefixes(include_ipv4=True, include_ipv6=True, sync_filters=sync_filters) + actual_prefixes = {(prefix.network, prefix.namespace) for prefix in self.nb_adapter.get_all("prefix")} + self.assertEqual( + actual_prefixes, + { + ("10.0.0.0/24", "Global"), + ("10.0.1.0/24", "Global"), + ("2001:5b0:4100::/48", "Global"), + }, + ) def test_load_prefixes_loads_prefixes_and_vlan_relationship(self): - self.nb_adapter.load_prefixes() - prefix_with_vlan = self.nb_adapter.get("prefix", {"network": "10.0.0.0/24"}) + self.nb_adapter.load_prefixes(include_ipv4=True, include_ipv6=False, sync_filters=self.sync_filters) + prefix_with_vlan = self.nb_adapter.get("prefix", {"network": "10.0.0.0/24", "namespace": "Global"}) self.assertEqual({10: {"vid": 10, "name": "ten", "group": None}}, prefix_with_vlan.vlans) def test_load_prefixes_loads_ranges(self): - self.nb_adapter.load_prefixes() - prefix_with_ranges = self.nb_adapter.get("prefix", {"network": "10.0.0.0/24"}) + self.nb_adapter.load_prefixes(include_ipv4=True, include_ipv6=False, sync_filters=self.sync_filters) + prefix_with_ranges = self.nb_adapter.get("prefix", {"network": "10.0.0.0/24", "namespace": "Global"}) self.assertEqual(["10.0.0.50-10.0.0.254"], prefix_with_ranges.ranges) + + def test_load_ipaddresses_loads_ips_default_namespace(self): + sync_filters = [{"network_view": "default"}] + self.nb_adapter.load_ipaddresses(sync_filters=sync_filters, include_ipv4=True, include_ipv6=False) + actual_ipaddresses = {(ipaddr.address, ipaddr.namespace) for ipaddr in self.nb_adapter.get_all("ipaddress")} + self.assertEqual( + actual_ipaddresses, + {("10.0.1.1", "Global"), ("10.0.1.2", "Global")}, + ) + + def test_load_ipaddresses_loads_ips_dev_namespace(self): + sync_filters = [{"network_view": "dev"}] + self.nb_adapter.load_ipaddresses(include_ipv4=True, include_ipv6=False, sync_filters=sync_filters) + actual_ipaddresses = {(ipaddr.address, ipaddr.namespace) for ipaddr in self.nb_adapter.get_all("ipaddress")} + self.assertEqual( + actual_ipaddresses, + {("10.0.1.1", "dev"), ("10.2.1.1", "dev")}, + ) + + def test_load_ipaddresses_loads_ips_dev_namespace_filtered(self): + sync_filters = [{"network_view": "dev", "prefixes_ipv4": ["10.0.1.0/24"]}] + self.nb_adapter.load_ipaddresses(include_ipv4=True, include_ipv6=False, sync_filters=sync_filters) + actual_ipaddresses = {(ipaddr.address, ipaddr.namespace) for ipaddr in self.nb_adapter.get_all("ipaddress")} + self.assertEqual( + actual_ipaddresses, + { + ("10.0.1.1", "dev"), + }, + ) + + def test_load_ipaddresses_loads_ips_multiple_filters(self): + sync_filters = [ + {"network_view": "dev", "prefixes_ipv4": ["10.0.0.0/16"]}, + {"network_view": "test", "prefixes_ipv4": ["10.5.0.0/16"]}, + ] + self.nb_adapter.load_ipaddresses(include_ipv4=True, include_ipv6=False, sync_filters=sync_filters) + actual_ipaddresses = {(ipaddr.address, ipaddr.namespace) for ipaddr in self.nb_adapter.get_all("ipaddress")} + self.assertEqual( + actual_ipaddresses, + { + ("10.0.1.1", "dev"), + ("10.5.1.5", "test"), + }, + ) + + def test_load_ipaddresses_loads_ips_ipv6(self): + sync_filters = [{"network_view": "default"}] + self.nb_adapter.load_ipaddresses(include_ipv4=False, include_ipv6=True, sync_filters=sync_filters) + actual_ipaddresses = {(ipaddr.address, ipaddr.namespace) for ipaddr in self.nb_adapter.get_all("ipaddress")} + self.assertEqual( + actual_ipaddresses, + { + ("2001:5b0:4100::1", "Global"), + }, + ) + + def test_load_ipaddresses_loads_ips_ipv4_and_ipv6(self): + sync_filters = [{"network_view": "default"}] + self.nb_adapter.load_ipaddresses(include_ipv4=True, include_ipv6=True, sync_filters=sync_filters) + actual_ipaddresses = {(ipaddr.address, ipaddr.namespace) for ipaddr in self.nb_adapter.get_all("ipaddress")} + self.assertEqual( + actual_ipaddresses, + { + ("10.0.1.1", "Global"), + ("10.0.1.2", "Global"), + ("2001:5b0:4100::1", "Global"), + }, + ) + + def test_load_ipaddresses_load_host_records(self): + self.config.dns_record_type = DNSRecordTypeChoices.HOST_RECORD + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = mock.Mock() + sync_filters = [{"network_view": "default"}] + nb_adapter.load_ipaddresses(include_ipv4=True, include_ipv6=False, sync_filters=sync_filters) + actual_records = { + (hostr.address, hostr.namespace, hostr.dns_name) for hostr in nb_adapter.get_all("dnshostrecord") + } + self.assertEqual( + actual_records, + { + ("10.0.1.1", "Global", "server1.nautobot.test.com"), + ("10.0.1.2", "Global", "server2.nautobot.test.com"), + }, + ) + + def test_load_ipaddresses_load_a_records(self): + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = mock.Mock() + sync_filters = [{"network_view": "dev"}] + nb_adapter.load_ipaddresses(include_ipv4=True, include_ipv6=False, sync_filters=sync_filters) + actual_records = { + (hostr.address, hostr.namespace, hostr.dns_name) for hostr in nb_adapter.get_all("dnsarecord") + } + self.assertEqual( + actual_records, + { + ("10.0.1.1", "dev", "server10.nautobot.test.com"), + ("10.2.1.1", "dev", "server11.nautobot.test.com"), + }, + ) + + def test_load_ipaddresses_load_ptr_records(self): + self.config.dns_record_type = DNSRecordTypeChoices.A_AND_PTR_RECORD + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = mock.Mock() + sync_filters = [{"network_view": "test"}] + nb_adapter.load_ipaddresses(include_ipv4=True, include_ipv6=False, sync_filters=sync_filters) + actual_records = { + (hostr.address, hostr.namespace, hostr.dns_name) for hostr in nb_adapter.get_all("dnsptrrecord") + } + self.assertEqual( + actual_records, + { + ("10.5.1.5", "test", "server21.nautobot.test.com"), + ("10.2.1.10", "test", "server20.nautobot.test.com"), + }, + ) diff --git a/nautobot_ssot/tests/infoblox/test_nautobot_models.py b/nautobot_ssot/tests/infoblox/test_nautobot_models.py new file mode 100644 index 000000000..79970ba6b --- /dev/null +++ b/nautobot_ssot/tests/infoblox/test_nautobot_models.py @@ -0,0 +1,966 @@ +# pylint: disable=too-many-lines,too-many-public-methods,R0801 +"""Unit tests for the Infoblox Diffsync models.""" +from unittest.mock import Mock + +from django.contrib.contenttypes.models import ContentType +from django.test import TestCase +from nautobot.extras.choices import CustomFieldTypeChoices +from nautobot.extras.models import CustomField, Status, Tag +from nautobot.ipam.models import IPAddress, Namespace, Prefix + +from nautobot_ssot.integrations.infoblox.choices import ( + DNSRecordTypeChoices, + FixedAddressTypeChoices, + NautobotDeletableModelChoices, +) +from nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox import InfobloxAdapter +from nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot import NautobotAdapter +from nautobot_ssot.tests.infoblox.fixtures_infoblox import create_default_infoblox_config, create_prefix_relationship + + +def _get_ip_address_dict(attrs): + """Build dict used for creating diffsync IP address.""" + ipaddress_dict = { + "description": "Test IPAddress", + "address": "10.0.0.1", + "status": "Active", + "prefix": "10.0.0.0/8", + "prefix_length": 8, + "ip_addr_type": "host", + "namespace": "dev", + "dns_name": "", + "ext_attrs": {}, + } + ipaddress_dict.update(attrs) + + return ipaddress_dict + + +def _get_dns_a_record_dict(attrs): + """Build dict used for creating diffsync DNS A record.""" + dns_a_record_dict = { + "description": "Test A Record", + "address": "10.0.0.1", + "status": "Active", + "prefix": "10.0.0.0/8", + "prefix_length": 8, + "dns_name": "server1.nautobot.local.net", + "ip_addr_type": "host", + "namespace": "dev", + } + dns_a_record_dict.update(attrs) + + return dns_a_record_dict + + +def _get_dns_ptr_record_dict(attrs): + """Build dict used for creating diffsync DNS PTR record.""" + dns_ptr_record_dict = { + "description": "Test PTR Record", + "address": "10.0.0.1", + "status": "Active", + "prefix": "10.0.0.0/8", + "prefix_length": 8, + "dns_name": "server1.local.test.net", + "ip_addr_type": "host", + "namespace": "dev", + } + dns_ptr_record_dict.update(attrs) + + return dns_ptr_record_dict + + +def _get_dns_host_record_dict(attrs): + """Build dict used for creating diffsync DNS Host record.""" + dns_host_record_dict = { + "description": "Test Host Record", + "address": "10.0.0.1", + "status": "Active", + "prefix": "10.0.0.0/8", + "prefix_length": 8, + "dns_name": "server1.local.test.net", + "ip_addr_type": "host", + "namespace": "dev", + } + dns_host_record_dict.update(attrs) + + return dns_host_record_dict + + +def _get_network_dict(attrs): + """Build dict used for creating diffsync network.""" + network_dict = { + "network": "10.0.0.0/8", + "description": "TestNetwork", + "namespace": "dev", + "status": "Active", + "ext_attrs": {}, + "vlans": {}, + } + network_dict.update(attrs) + + return network_dict + + +class TestModelNautobotNetwork(TestCase): + """Tests correct network record is created.""" + + def setUp(self): + "Test class set up." + create_prefix_relationship() + self.config = create_default_infoblox_config() + self.config.infoblox_sync_filters = [{"network_view": "default"}, {"network_view": "dev"}] + self.namespace_dev, _ = Namespace.objects.get_or_create(name="dev") + self.status_active, _ = Status.objects.get_or_create(name="Active") + self.tag_sync_from_infoblox, _ = Tag.objects.get_or_create(name="SSoT Synced from Infoblox") + self.infoblox_adapter = InfobloxAdapter(conn=Mock(), config=self.config) + inf_ds_namespace = self.infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + self.infoblox_adapter.add(inf_ds_namespace) + inf_ds_namespace = self.infoblox_adapter.namespace( + name="dev", + ext_attrs={}, + ) + self.infoblox_adapter.add(inf_ds_namespace) + + def test_network_create_network(self): + """Validate network gets created.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + prefix = Prefix.objects.get(network="10.0.0.0", prefix_length="8", namespace__name="dev") + + self.assertEqual("10.0.0.0/8", str(prefix.prefix)) + self.assertEqual("dev", prefix.namespace.name) + self.assertEqual("Active", prefix.status.name) + self.assertEqual("TestNetwork", prefix.description) + self.assertEqual("network", prefix.type) + self.assertIn(self.tag_sync_from_infoblox, prefix.tags.all()) + + def test_network_update_network(self): + """Validate network gets updated.""" + inf_network_atrs = { + "network_type": "network", + "namespace": "dev", + "ext_attrs": {"vlan": "10"}, + "description": "New description", + } + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + + Prefix.objects.get_or_create( + prefix="10.0.0.0/24", + status=self.status_active, + type="network", + description="Old description", + namespace=self.namespace_dev, + ) + + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + prefix = Prefix.objects.get(network="10.0.0.0", prefix_length="8", namespace__name="dev") + + self.assertEqual("10.0.0.0/8", str(prefix.prefix)) + self.assertEqual("dev", prefix.namespace.name) + self.assertEqual("Active", prefix.status.name) + self.assertEqual("New description", prefix.description) + self.assertEqual("network", prefix.type) + self.assertEqual({"vlan": "10"}, prefix.custom_field_data) + self.assertIn(self.tag_sync_from_infoblox, prefix.tags.all()) + + +class TestModelNautobotIPAddress(TestCase): + """Tests correct IP address record is created or updated.""" + + def setUp(self): + "Test class set up." + create_prefix_relationship() + self.config = create_default_infoblox_config() + self.config.infoblox_sync_filters = [{"network_view": "default"}, {"network_view": "dev"}] + self.namespace_dev, _ = Namespace.objects.get_or_create(name="dev") + self.status_active, _ = Status.objects.get_or_create(name="Active") + self.tag_sync_from_infoblox, _ = Tag.objects.get_or_create(name="SSoT Synced from Infoblox") + self.infoblox_adapter = InfobloxAdapter(conn=Mock(), config=self.config) + inf_ds_namespace = self.infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + self.infoblox_adapter.add(inf_ds_namespace) + inf_ds_namespace = self.infoblox_adapter.namespace( + name="dev", + ext_attrs={}, + ) + self.infoblox_adapter.add(inf_ds_namespace) + + mac_address_custom_field, _ = CustomField.objects.get_or_create( + type=CustomFieldTypeChoices.TYPE_TEXT, + key="mac_address", + defaults={ + "label": "MAC Address", + }, + ) + mac_address_custom_field.content_types.add(ContentType.objects.get_for_model(IPAddress)) + + fixed_address_comment_custom_field, _ = CustomField.objects.get_or_create( + type=CustomFieldTypeChoices.TYPE_TEXT, + key="fixed_address_comment", + defaults={ + "label": "Fixed Address Comment", + }, + ) + fixed_address_comment_custom_field.content_types.add(ContentType.objects.get_for_model(IPAddress)) + + dns_a_record_comment_custom_field, _ = CustomField.objects.get_or_create( + type=CustomFieldTypeChoices.TYPE_TEXT, + key="dns_a_record_comment", + defaults={ + "label": "DNS A Record Comment", + }, + ) + dns_a_record_comment_custom_field.content_types.add(ContentType.objects.get_for_model(IPAddress)) + + dns_host_record_comment_custom_field, _ = CustomField.objects.get_or_create( + type=CustomFieldTypeChoices.TYPE_TEXT, + key="dns_host_record_comment", + defaults={ + "label": "DNS Host Record Comment", + }, + ) + dns_host_record_comment_custom_field.content_types.add(ContentType.objects.get_for_model(IPAddress)) + + dns_ptr_record_comment_custom_field, _ = CustomField.objects.get_or_create( + type=CustomFieldTypeChoices.TYPE_TEXT, + key="dns_ptr_record_comment", + defaults={ + "label": "DNS PTR Record Comment", + }, + ) + dns_ptr_record_comment_custom_field.content_types.add(ContentType.objects.get_for_model(IPAddress)) + + def test_ip_address_create_from_fixed_address_reserved(self): + """Validate ip address gets created from Infoblox fixed address reservation.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_address_atrs = { + "ip_addr_type": "dhcp", + "has_fixed_address": True, + "description": "FixedAddressReserved", + "fixed_address_comment": "Created From FA Reserved", + } + inf_ds_ipaddress = self.infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_address_atrs)) + self.infoblox_adapter.add(inf_ds_ipaddress) + + Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("FixedAddressReserved", ipaddress.description) + self.assertEqual("dhcp", ipaddress.type) + self.assertEqual("Created From FA Reserved", ipaddress.custom_field_data.get("fixed_address_comment")) + self.assertIn(self.tag_sync_from_infoblox, ipaddress.tags.all()) + + def test_ip_address_create_from_fixed_address_mac(self): + """Validate ip address gets created from Infoblox fixed address with mac address.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_address_atrs = { + "ip_addr_type": "dhcp", + "has_fixed_address": True, + "mac_address": "52:1f:83:d4:9a:2e", + "description": "FixedAddressMAC", + "fixed_address_comment": "Created From FA MAC", + } + inf_ds_ipaddress = self.infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_address_atrs)) + self.infoblox_adapter.add(inf_ds_ipaddress) + + Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("FixedAddressMAC", ipaddress.description) + self.assertEqual("dhcp", ipaddress.type) + self.assertEqual("52:1f:83:d4:9a:2e", ipaddress.custom_field_data.get("mac_address")) + self.assertEqual("Created From FA MAC", ipaddress.custom_field_data.get("fixed_address_comment")) + self.assertIn(self.tag_sync_from_infoblox, ipaddress.tags.all()) + + def test_ip_address_create_from_dns_a_record(self): + """Validate ip address gets created from Infoblox DNS A record.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_arecord_atrs = { + "dns_name": "server1.nautobot.local.net", + "ref": "record:a/xyz", + } + inf_ds_arecord = self.infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + self.infoblox_adapter.add(inf_ds_arecord) + + Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("server1.nautobot.local.net", ipaddress.dns_name) + self.assertEqual("Test A Record", ipaddress.custom_field_data.get("dns_a_record_comment")) + self.assertEqual("", ipaddress.description) + self.assertEqual("host", ipaddress.type) + self.assertIn(self.tag_sync_from_infoblox, ipaddress.tags.all()) + + def test_ip_address_create_from_dns_host_record(self): + """Validate ip address gets created from Infoblox DNS Host record.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_hostrecord_atrs = { + "address": "10.0.0.2", + "dns_name": "server1.nautobot.local.net", + "ref": "record:host/xyz", + } + inf_ds_hostrecord = self.infoblox_adapter.dnshostrecord(**_get_dns_host_record_dict(inf_hostrecord_atrs)) + self.infoblox_adapter.add(inf_ds_hostrecord) + + Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + + self.config.fixed_address_type = FixedAddressTypeChoices.DONT_CREATE_RECORD + self.config.dns_record_type = DNSRecordTypeChoices.HOST_RECORD + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.2/8", parent__namespace__name="dev") + self.assertEqual("10.0.0.2/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("server1.nautobot.local.net", ipaddress.dns_name) + self.assertEqual("Test Host Record", ipaddress.custom_field_data.get("dns_host_record_comment")) + self.assertEqual("", ipaddress.description) + self.assertEqual("host", ipaddress.type) + self.assertIn(self.tag_sync_from_infoblox, ipaddress.tags.all()) + + def test_ip_address_create_from_fixed_address_reserved_and_dns_a_record(self): + """Validate ip address gets created from Infoblox Fixed Address MAC and updated with DNS A record data.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_address_atrs = { + "ip_addr_type": "dhcp", + "has_a_record": True, + "description": "FixedAddressMAC", + "has_fixed_address": True, + "mac_address": "52:1f:83:d4:9a:2e", + "fixed_address_comment": "Created From FA MAC", + } + inf_ds_ipaddress = self.infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_address_atrs)) + self.infoblox_adapter.add(inf_ds_ipaddress) + inf_arecord_atrs = { + "dns_name": "server1.nautobot.local.net", + "ref": "record:a/xyz", + } + inf_ds_arecord = self.infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + self.infoblox_adapter.add(inf_ds_arecord) + + Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("server1.nautobot.local.net", ipaddress.dns_name) + self.assertEqual("FixedAddressMAC", ipaddress.description) + self.assertEqual("dhcp", ipaddress.type) + self.assertEqual("52:1f:83:d4:9a:2e", ipaddress.custom_field_data.get("mac_address")) + self.assertEqual("Created From FA MAC", ipaddress.custom_field_data.get("fixed_address_comment")) + self.assertEqual("Test A Record", ipaddress.custom_field_data.get("dns_a_record_comment")) + self.assertIn(self.tag_sync_from_infoblox, ipaddress.tags.all()) + + def test_ip_address_create_from_fixed_address_mac_and_dns_a_ptr_records(self): + """Validate ip address gets created from Infoblox Fixed Address MAC and updated with DNS A and PTR records data.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_address_atrs = { + "ip_addr_type": "dhcp", + "has_a_record": True, + "description": "FixedAddressMAC", + "has_fixed_address": True, + "mac_address": "52:1f:83:d4:9a:2e", + "fixed_address_comment": "Created From FA MAC", + } + inf_ds_ipaddress = self.infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_address_atrs)) + self.infoblox_adapter.add(inf_ds_ipaddress) + inf_arecord_atrs = { + "dns_name": "server1.nautobot.local.net", + "ref": "record:a/xyz", + } + inf_ds_arecord = self.infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + self.infoblox_adapter.add(inf_ds_arecord) + inf_ptrrecord_atrs = { + "dns_name": "server1.nautobot.local.net", + "ref": "record:ptr/xyz", + } + inf_ds_ptrrecord = self.infoblox_adapter.dnsptrrecord(**_get_dns_ptr_record_dict(inf_ptrrecord_atrs)) + self.infoblox_adapter.add(inf_ds_ptrrecord) + + Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + + self.config.dns_record_type = DNSRecordTypeChoices.A_AND_PTR_RECORD + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("server1.nautobot.local.net", ipaddress.dns_name) + self.assertEqual("FixedAddressMAC", ipaddress.description) + self.assertEqual("dhcp", ipaddress.type) + self.assertEqual("52:1f:83:d4:9a:2e", ipaddress.custom_field_data.get("mac_address")) + self.assertEqual("Created From FA MAC", ipaddress.custom_field_data.get("fixed_address_comment")) + self.assertEqual("Test A Record", ipaddress.custom_field_data.get("dns_a_record_comment")) + self.assertEqual("Test PTR Record", ipaddress.custom_field_data.get("dns_ptr_record_comment")) + self.assertIn(self.tag_sync_from_infoblox, ipaddress.tags.all()) + + def test_ip_address_create_from_fixed_address_mac_and_dns_host_record(self): + """Validate ip address gets created from Infoblox Fixed Address MAC and updated with DNS host record data.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_address_atrs = { + "ip_addr_type": "dhcp", + "has_a_record": True, + "description": "FixedAddressMAC", + "has_fixed_address": True, + "mac_address": "52:1f:83:d4:9a:2e", + "fixed_address_comment": "Created From FA MAC", + } + inf_ds_ipaddress = self.infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_address_atrs)) + self.infoblox_adapter.add(inf_ds_ipaddress) + inf_hostrecord_atrs = { + "dns_name": "server1.nautobot.local.net", + "ref": "record:host/xyz", + } + inf_ds_hostrecord = self.infoblox_adapter.dnshostrecord(**_get_dns_host_record_dict(inf_hostrecord_atrs)) + self.infoblox_adapter.add(inf_ds_hostrecord) + + Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + + self.config.dns_record_type = DNSRecordTypeChoices.HOST_RECORD + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("server1.nautobot.local.net", ipaddress.dns_name) + self.assertEqual("FixedAddressMAC", ipaddress.description) + self.assertEqual("dhcp", ipaddress.type) + self.assertEqual("52:1f:83:d4:9a:2e", ipaddress.custom_field_data.get("mac_address")) + self.assertEqual("Created From FA MAC", ipaddress.custom_field_data.get("fixed_address_comment")) + self.assertEqual("Test Host Record", ipaddress.custom_field_data.get("dns_host_record_comment")) + self.assertIn(self.tag_sync_from_infoblox, ipaddress.tags.all()) + + ############ + # IP Address updates + ########### + + def test_ip_address_update_from_fixed_address_reserved(self): + """Validate ip address gets updated from Infoblox fixed address reservation.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_address_atrs = { + "ip_addr_type": "dhcp", + "has_fixed_address": True, + "description": "FixedAddressReserved", + "fixed_address_comment": "Created From FA Reserved", + "ext_attrs": {"gateway": "10.0.0.254"}, + } + inf_ds_ipaddress = self.infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_address_atrs)) + self.infoblox_adapter.add(inf_ds_ipaddress) + + parent_pfx, _ = Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + IPAddress.objects.get_or_create( + address="10.0.0.1/8", + status=self.status_active, + type="host", + description="OldDescription", + parent=parent_pfx, + ) + + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("FixedAddressReserved", ipaddress.description) + self.assertEqual("dhcp", ipaddress.type) + self.assertEqual("Created From FA Reserved", ipaddress.custom_field_data.get("fixed_address_comment")) + self.assertEqual("10.0.0.254", ipaddress.custom_field_data.get("gateway")) + + def test_ip_address_update_address_from_fixed_address_mac(self): + """Validate ip address gets created from Infoblox fixed address with mac address.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_address_atrs = { + "ip_addr_type": "dhcp", + "has_fixed_address": True, + "mac_address": "52:1f:83:d4:9a:2e", + "description": "FixedAddressMAC", + "fixed_address_comment": "Created From FA MAC", + "ext_attrs": {"gateway": "10.0.0.254"}, + } + inf_ds_ipaddress = self.infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_address_atrs)) + self.infoblox_adapter.add(inf_ds_ipaddress) + + parent_pfx, _ = Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + IPAddress.objects.get_or_create( + address="10.0.0.1/8", + status=self.status_active, + type="host", + parent=parent_pfx, + defaults={ + "description": "OldDescription", + "_custom_field_data": {"mac_address": "52:1f:83:d4:9a:2a"}, + }, + ) + + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("FixedAddressMAC", ipaddress.description) + self.assertEqual("dhcp", ipaddress.type) + self.assertEqual("52:1f:83:d4:9a:2e", ipaddress.custom_field_data.get("mac_address")) + self.assertEqual("Created From FA MAC", ipaddress.custom_field_data.get("fixed_address_comment")) + + def test_ip_address_update_address_from_dns_a_record(self): + """Validate ip address gets created from Infoblox DNS A record.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_arecord_atrs = { + "dns_name": "server1.nautobot.local.net", + "ref": "record:a/xyz", + } + inf_ds_arecord = self.infoblox_adapter.dnsarecord(**_get_dns_a_record_dict(inf_arecord_atrs)) + self.infoblox_adapter.add(inf_ds_arecord) + inf_address_atrs = { + "ip_addr_type": "dhcp", + "has_fixed_address": True, + "description": "FixedAddressReserved", + "fixed_address_comment": "Created From FA Reserved", + "ext_attrs": {"gateway": "10.0.0.254"}, + } + inf_ds_ipaddress = self.infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_address_atrs)) + self.infoblox_adapter.add(inf_ds_ipaddress) + + parent_pfx, _ = Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + IPAddress.objects.get_or_create( + address="10.0.0.1/8", + status=self.status_active, + type="host", + parent=parent_pfx, + defaults={ + "dns_name": "server.nautobot.local.net", + "description": "OldDescription", + "_custom_field_data": { + "mac_address": "52:1f:83:d4:9a:2a", + "fixed_address_comment": "Old FA comment", + "dns_a_record_comment": "Old A record comment", + }, + }, + ) + + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("FixedAddressReserved", ipaddress.description) + self.assertEqual("server1.nautobot.local.net", ipaddress.dns_name) + self.assertEqual("Created From FA Reserved", ipaddress.custom_field_data.get("fixed_address_comment")) + self.assertEqual("Test A Record", ipaddress.custom_field_data.get("dns_a_record_comment")) + self.assertEqual("dhcp", ipaddress.type) + + ############ + # IP Address deletes + ########### + + def test_ip_address_delete_fail(self): + """Validate ip address is not deleted if object deletion is not enabled in the config.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + + parent_pfx, _ = Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + IPAddress.objects.get_or_create( + address="10.0.0.1/8", + status=self.status_active, + type="dhcp", + parent=parent_pfx, + defaults={ + "description": "OldDescription", + "_custom_field_data": { + "mac_address": "52:1f:83:d4:9a:2a", + "fixed_address_comment": "Old FA comment", + }, + }, + ) + + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + self.config.nautobot_deletable_models = [] + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("OldDescription", ipaddress.description) + self.assertEqual("dhcp", ipaddress.type) + self.assertEqual("Old FA comment", ipaddress.custom_field_data.get("fixed_address_comment")) + + def test_ip_address_delete_success(self): + """Validate ip address is deleted if object deletion is enabled in the config.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + + parent_pfx, _ = Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + IPAddress.objects.get_or_create( + address="10.0.0.1/8", + status=self.status_active, + type="host", + description="OldDescription", + parent=parent_pfx, + ) + + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + self.config.nautobot_deletable_models = [NautobotDeletableModelChoices.IP_ADDRESS] + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + with self.assertRaises(IPAddress.DoesNotExist): + IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + + def test_ip_address_delete_a_record(self): + """Validate A record data for ip address is deleted if object deletion is enabled in the config.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_address_atrs = { + "ip_addr_type": "dhcp", + "has_fixed_address": True, + "description": "FixedAddressReserved", + "fixed_address_comment": "Created From FA Reserved", + } + inf_ds_ipaddress = self.infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_address_atrs)) + self.infoblox_adapter.add(inf_ds_ipaddress) + + parent_pfx, _ = Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + IPAddress.objects.get_or_create( + address="10.0.0.1/8", + status=self.status_active, + type="dhcp", + parent=parent_pfx, + defaults={ + "description": "FixedAddressReserved", + "dns_name": "server1.nautobot.local.net", + "_custom_field_data": { + "fixed_address_comment": "Created From FA Reserved", + "dns_a_record_comment": "Created From A Record", + }, + }, + ) + + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + self.config.nautobot_deletable_models = [NautobotDeletableModelChoices.DNS_A_RECORD] + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("FixedAddressReserved", ipaddress.description) + self.assertEqual("", ipaddress.dns_name) + self.assertEqual("Created From FA Reserved", ipaddress.custom_field_data.get("fixed_address_comment")) + self.assertEqual("", ipaddress.custom_field_data.get("dns_a_record_comment")) + self.assertEqual("dhcp", ipaddress.type) + + def test_ip_address_delete_host_record(self): + """Validate Host record data for ip address is deleted if object deletion is enabled in the config.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_address_atrs = { + "ip_addr_type": "dhcp", + "has_fixed_address": True, + "description": "FixedAddressReserved", + "fixed_address_comment": "Created From FA Reserved", + } + inf_ds_ipaddress = self.infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_address_atrs)) + self.infoblox_adapter.add(inf_ds_ipaddress) + + parent_pfx, _ = Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + IPAddress.objects.get_or_create( + address="10.0.0.1/8", + status=self.status_active, + type="dhcp", + parent=parent_pfx, + defaults={ + "description": "FixedAddressReserved", + "dns_name": "server1.nautobot.local.net", + "_custom_field_data": { + "fixed_address_comment": "Created From FA Reserved", + "dns_host_record_comment": "Created From Host Record", + }, + }, + ) + + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + self.config.dns_record_type = DNSRecordTypeChoices.HOST_RECORD + self.config.nautobot_deletable_models = [NautobotDeletableModelChoices.DNS_HOST_RECORD] + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("FixedAddressReserved", ipaddress.description) + self.assertEqual("", ipaddress.dns_name) + self.assertEqual("Created From FA Reserved", ipaddress.custom_field_data.get("fixed_address_comment")) + self.assertEqual("", ipaddress.custom_field_data.get("dns_host_record_comment")) + self.assertEqual("dhcp", ipaddress.type) + + def test_ip_address_delete_a_ptr_records(self): + """Validate A and PTR record data for ip address is deleted if object deletion is enabled in the config.""" + inf_network_atrs = {"network_type": "network", "namespace": "dev"} + inf_ds_network = self.infoblox_adapter.prefix(**_get_network_dict(inf_network_atrs)) + self.infoblox_adapter.add(inf_ds_network) + inf_address_atrs = { + "ip_addr_type": "dhcp", + "has_fixed_address": True, + "description": "FixedAddressReserved", + "fixed_address_comment": "Created From FA Reserved", + } + inf_ds_ipaddress = self.infoblox_adapter.ipaddress(**_get_ip_address_dict(inf_address_atrs)) + self.infoblox_adapter.add(inf_ds_ipaddress) + + parent_pfx, _ = Prefix.objects.get_or_create( + prefix="10.0.0.0/8", + status=self.status_active, + type="network", + description="TestNetwork", + namespace=self.namespace_dev, + ) + IPAddress.objects.get_or_create( + address="10.0.0.1/8", + status=self.status_active, + type="dhcp", + parent=parent_pfx, + defaults={ + "description": "FixedAddressReserved", + "dns_name": "server1.nautobot.local.net", + "_custom_field_data": { + "fixed_address_comment": "Created From FA Reserved", + "dns_a_record_comment": "Created From A Record", + "dns_ptr_record_comment": "Created From PTR Record", + }, + }, + ) + + self.config.fixed_address_type = FixedAddressTypeChoices.RESERVED + self.config.dns_record_type = DNSRecordTypeChoices.A_AND_PTR_RECORD + self.config.nautobot_deletable_models = [ + NautobotDeletableModelChoices.DNS_A_RECORD, + NautobotDeletableModelChoices.DNS_PTR_RECORD, + ] + nb_adapter = NautobotAdapter(config=self.config) + nb_adapter.job = Mock() + nb_adapter.load() + self.infoblox_adapter.sync_to(nb_adapter) + + ipaddress = IPAddress.objects.get(address="10.0.0.1/8", parent__namespace__name="dev") + + self.assertEqual("10.0.0.1/8", str(ipaddress.address)) + self.assertEqual("dev", ipaddress.parent.namespace.name) + self.assertEqual("Active", ipaddress.status.name) + self.assertEqual("FixedAddressReserved", ipaddress.description) + self.assertEqual("", ipaddress.dns_name) + self.assertEqual("Created From FA Reserved", ipaddress.custom_field_data.get("fixed_address_comment")) + self.assertEqual("", ipaddress.custom_field_data.get("dns_a_record_comment")) + self.assertEqual("", ipaddress.custom_field_data.get("dns_ptr_record_comment")) + self.assertEqual("dhcp", ipaddress.type) diff --git a/nautobot_ssot/tests/infoblox/test_tags_and_cfs.py b/nautobot_ssot/tests/infoblox/test_tags_and_cfs.py index e4dd93fc4..f4113c490 100644 --- a/nautobot_ssot/tests/infoblox/test_tags_and_cfs.py +++ b/nautobot_ssot/tests/infoblox/test_tags_and_cfs.py @@ -5,11 +5,16 @@ from django.contrib.contenttypes.models import ContentType from django.test import TestCase +from nautobot.extras.choices import CustomFieldTypeChoices from nautobot.extras.models import CustomField, Status, Tag -from nautobot.ipam.models import VLAN, IPAddress, Prefix, VLANGroup +from nautobot.ipam.models import VLAN, IPAddress, Namespace, Prefix, VLANGroup +from nautobot_ssot.integrations.infoblox.choices import DNSRecordTypeChoices, FixedAddressTypeChoices from nautobot_ssot.integrations.infoblox.diffsync.adapters.infoblox import InfobloxAdapter from nautobot_ssot.integrations.infoblox.diffsync.adapters.nautobot import NautobotAdapter +from nautobot_ssot.tests.infoblox.fixtures_infoblox import create_prefix_relationship + +from .fixtures_infoblox import create_default_infoblox_config class TestTagging(TestCase): @@ -17,45 +22,83 @@ class TestTagging(TestCase): def setUp(self): "Test class set up." - self.tag_sync_from_infoblox = Tag.objects.get(name="SSoT Synced from Infoblox") - self.tag_sync_to_infoblox = Tag.objects.get(name="SSoT Synced to Infoblox") + self.tag_sync_from_infoblox, _ = Tag.objects.get_or_create( + name="SSoT Synced from Infoblox", + defaults={ + "name": "SSoT Synced from Infoblox", + "description": "Object synced at some point from Infoblox", + "color": "40bfae", + }, + ) + for model in [IPAddress, Namespace, Prefix, VLAN]: + self.tag_sync_from_infoblox.content_types.add(ContentType.objects.get_for_model(model)) + self.tag_sync_to_infoblox, _ = Tag.objects.get_or_create( + name="SSoT Synced to Infoblox", + defaults={ + "name": "SSoT Synced to Infoblox", + "description": "Object synced at some point to Infoblox", + "color": "40bfae", + }, + ) + for model in [IPAddress, Prefix, VLAN]: + self.tag_sync_to_infoblox.content_types.add(ContentType.objects.get_for_model(model)) + self.config = create_default_infoblox_config() def test_tags_have_correct_content_types_set(self): """Ensure tags have correct content types configured.""" for model in (IPAddress, Prefix, VLAN): content_type = ContentType.objects.get_for_model(model) - self.assertIn(content_type, self.tag_sync_from_infoblox.content_types.all()) self.assertIn(content_type, self.tag_sync_to_infoblox.content_types.all()) + for model in (IPAddress, Namespace, Prefix, VLAN): + content_type = ContentType.objects.get_for_model(model) + self.assertIn(content_type, self.tag_sync_from_infoblox.content_types.all()) + def test_objects_synced_from_infoblox_are_tagged(self): """Ensure objects synced from Infoblox have 'SSoT Synced from Infoblox' tag applied.""" - nb_diffsync = NautobotAdapter() - nb_diffsync.job = Mock() - nb_diffsync.load() + self.config.dns_record_type = DNSRecordTypeChoices.A_RECORD + self.config.fixed_address_type = FixedAddressTypeChoices.MAC_ADDRESS + nautobot_adapter = NautobotAdapter(config=self.config) + nautobot_adapter.job = Mock() + nautobot_adapter.load() + + Namespace.objects.get_or_create(name="Global") - infoblox_adapter = InfobloxAdapter(conn=Mock()) + infoblox_adapter = InfobloxAdapter(conn=Mock(), config=self.config) + ds_namespace_global = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(ds_namespace_global) + ds_namespace_dev = infoblox_adapter.namespace( + name="dev", + ext_attrs={}, + ) + infoblox_adapter.add(ds_namespace_dev) ds_prefix = infoblox_adapter.prefix( network="10.0.0.0/8", description="Test Network", network_type="network", + ext_attrs={}, + vlans={}, status="Active", + namespace="Global", ) infoblox_adapter.add(ds_prefix) ds_ipaddress = infoblox_adapter.ipaddress( description="Test IPAddress", address="10.0.0.1", status="Active", - dns_name="", + has_fixed_address=True, prefix="10.0.0.0/8", prefix_length=8, - ip_addr_type="host", + ip_addr_type="dhcp", + ext_attrs={}, + namespace="Global", ) infoblox_adapter.add(ds_ipaddress) - ds_vlangroup = infoblox_adapter.vlangroup( - name="TestVLANGroup", - description="", - ) + ds_vlangroup = infoblox_adapter.vlangroup(name="TestVLANGroup", description="", ext_attrs={}) infoblox_adapter.add(ds_vlangroup) ds_vlan = infoblox_adapter.vlan( vid=750, @@ -66,8 +109,10 @@ def test_objects_synced_from_infoblox_are_tagged(self): ext_attrs={}, ) infoblox_adapter.add(ds_vlan) + infoblox_adapter.sync_to(nautobot_adapter) - nb_diffsync.sync_from(infoblox_adapter) + namespace = Namespace.objects.get(name="dev") + self.assertEqual(namespace.tags.all()[0], self.tag_sync_from_infoblox) prefix = Prefix.objects.get(network="10.0.0.0", prefix_length="8") self.assertEqual(prefix.tags.all()[0], self.tag_sync_from_infoblox) @@ -80,6 +125,7 @@ def test_objects_synced_from_infoblox_are_tagged(self): def test_objects_synced_to_infoblox_are_tagged(self): """Ensure objects synced to Infoblox have 'SSoT Synced to Infoblox' tag applied.""" + create_prefix_relationship() nb_prefix = Prefix( network="10.0.0.0", prefix_length=8, @@ -108,12 +154,17 @@ def test_objects_synced_to_infoblox_are_tagged(self): ) nb_vlan.validated_save() - nautobot_adapter = NautobotAdapter() + nautobot_adapter = NautobotAdapter(config=self.config) nautobot_adapter.job = Mock() nautobot_adapter.load() - infoblox_adapter = InfobloxAdapter(conn=Mock()) + infoblox_adapter = InfobloxAdapter(conn=Mock(), config=self.config) infoblox_adapter.job = Mock() + ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(ds_namespace) nautobot_adapter.sync_to(infoblox_adapter) prefix = Prefix.objects.get(network="10.0.0.0", prefix_length="8") @@ -132,7 +183,17 @@ class TestCustomFields(TestCase): def setUp(self): """Test class set up.""" self.today = datetime.date.today().isoformat() - self.cf_synced_to_infoblox = CustomField.objects.get(key="ssot_synced_to_infoblox") + self.cf_synced_to_infoblox, _ = CustomField.objects.get_or_create( + type=CustomFieldTypeChoices.TYPE_DATE, + key="ssot_synced_to_infoblox", + defaults={ + "label": "Last synced to Infoblox on", + }, + ) + for model in [IPAddress, Prefix, VLAN, VLANGroup]: + self.cf_synced_to_infoblox.content_types.add(ContentType.objects.get_for_model(model)) + create_prefix_relationship() + self.config = create_default_infoblox_config() def test_cfs_have_correct_content_types_set(self): """Ensure cfs have correct content types configured.""" @@ -172,13 +233,20 @@ def test_cf_updated_for_objects_synced_to_infoblox(self): ) nb_vlan.validated_save() - nautobot_adapter = NautobotAdapter() + nautobot_adapter = NautobotAdapter(config=self.config) nautobot_adapter.job = Mock() nautobot_adapter.load() conn = Mock() - infoblox_adapter = InfobloxAdapter(conn=conn) + infoblox_adapter = InfobloxAdapter(conn=conn, config=self.config) infoblox_adapter.job = Mock() + + ds_namespace = infoblox_adapter.namespace( + name="Global", + ext_attrs={}, + ) + infoblox_adapter.add(ds_namespace) + nautobot_adapter.sync_to(infoblox_adapter) prefix = Prefix.objects.get(network="10.0.0.0", prefix_length="8") diff --git a/nautobot_ssot/tests/infoblox/test_utils.py b/nautobot_ssot/tests/infoblox/test_utils.py index fde91382e..27f9b2768 100644 --- a/nautobot_ssot/tests/infoblox/test_utils.py +++ b/nautobot_ssot/tests/infoblox/test_utils.py @@ -1,16 +1,20 @@ """Util tests that do not require Django.""" import unittest +import unittest.mock from django.test import TestCase - from nautobot.extras.models import Status from nautobot.ipam.models import VLAN, VLANGroup from nautobot_ssot.integrations.infoblox.utils.diffsync import ( + get_default_custom_fields, + get_ext_attr_dict, + get_valid_custom_fields, get_vlan_view_name, + map_network_view_to_namespace, nautobot_vlan_status, - get_ext_attr_dict, + validate_dns_name, ) from nautobot_ssot.integrations.infoblox.utils.nautobot import build_vlan_map_from_relations @@ -37,6 +41,114 @@ def test_get_ext_attr_dict(self): standardized_dict = get_ext_attr_dict(test_dict) self.assertEqual(standardized_dict, expected) + def test_get_ext_attr_dict_slugify(self): + """Test get_ext_attr_dict slugifies keys.""" + test_dict = {"Site-Loc": {"value": "NTC"}, "Region": {"value": "Central"}} + expected = {"site_loc": "NTC", "region": "Central"} + standardized_dict = get_ext_attr_dict(test_dict) + self.assertEqual(standardized_dict, expected) + + def test_get_ext_attr_dict_exclusion_list(self): + """Test get_ext_attr_dict correctly excludes attributes.""" + test_dict = {"Site": {"value": "HQ"}, "Region": {"value": "Central"}, "Tenant": {"value": "NTC"}} + excluded_attrs = ["Tenant"] + expected = {"site": "HQ", "region": "Central"} + standardized_dict = get_ext_attr_dict(extattrs=test_dict, excluded_attrs=excluded_attrs) + self.assertEqual(standardized_dict, expected) + + def test_validate_dns_name(self): + """Test validate_dns_name.""" + client = unittest.mock.Mock() + client.get_dns_view_for_network_view = unittest.mock.Mock(return_value="default.dev") + client.get_authoritative_zones_for_dns_view = unittest.mock.Mock( + return_value=[ + { + "fqdn": "nautobot.local.dev", + }, + { + "fqdn": "nautobot.local.test", + }, + ] + ) + + valid_name = "server1.nautobot.local.dev" + invalid_name = "server1.nautobot.local.prod" + + self.assertEqual(False, validate_dns_name(client, invalid_name, "dev")) + self.assertEqual(True, validate_dns_name(client, valid_name, "dev")) + + def test_map_network_view_to_namespace(self): + """Test map_network_view_to_namespace.""" + network_view1 = "dev" + network_view2 = "default" + + namespace1 = "test" + namespace2 = "Global" + + self.assertEqual("dev", map_network_view_to_namespace(value=network_view1, direction="nv_to_ns")) + self.assertEqual("Global", map_network_view_to_namespace(value=network_view2, direction="nv_to_ns")) + self.assertEqual("test", map_network_view_to_namespace(value=namespace1, direction="ns_to_nv")) + self.assertEqual("default", map_network_view_to_namespace(value=namespace2, direction="ns_to_nv")) + + def test_get_valid_custom_fields(self): + """Test get_valid_custom_fields.""" + excluded_cfs = ["synced_to_snow"] + + cfs1 = {"ssot_synced_to_infoblox": True, "dhcp_ranges": [], "mac_address": "", "vlan": 100} + cfs2 = {"tenant": "NTC", "synced_to_snow": True} + + expected1 = {"vlan": 100} + expected2 = {"tenant": "NTC"} + + self.assertEqual(expected1, get_valid_custom_fields(cfs=cfs1)) + self.assertEqual(expected2, get_valid_custom_fields(cfs=cfs2, excluded_cfs=excluded_cfs)) + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.diffsync.CustomField", + autospec=True, + ) + def test_get_default_custom_fields(self, custom_field): + """Test get_default_custom_fields.""" + content_type = unittest.mock.Mock() + cf1 = unittest.mock.Mock() + cf2 = unittest.mock.Mock() + cf_def_excl1 = unittest.mock.Mock() + cf_def_excl2 = unittest.mock.Mock() + cf1.key = "tenant" + cf2.key = "site" + cf_def_excl1.key = "ssot_synced_to_infoblox" + cf_def_excl2.key = "dhcp_ranges" + + custom_field.objects.filter.return_value = [cf1, cf2, cf_def_excl1, cf_def_excl2] + + expected = {"tenant": None, "site": None} + + result = get_default_custom_fields(cf_contenttype=content_type) + self.assertEqual(expected, result) + + @unittest.mock.patch( + "nautobot_ssot.integrations.infoblox.utils.diffsync.CustomField", + autospec=True, + ) + def test_get_default_custom_fields_excluded(self, custom_field): + """Test get_default_custom_fields with excluded cfs.""" + content_type = unittest.mock.Mock() + cf1 = unittest.mock.Mock() + cf2 = unittest.mock.Mock() + cf3 = unittest.mock.Mock() + cf4 = unittest.mock.Mock() + cf1.key = "tenant" + cf2.key = "site" + cf3.key = "snow_synced" + cf4.key = "vlan" + excluded_cfs = ["snow_synced", "vlan"] + custom_field.objects.filter.return_value = [cf1, cf2, cf3, cf4] + + expected = {"tenant": None, "site": None} + + result = get_default_custom_fields(cf_contenttype=content_type, excluded_cfs=excluded_cfs) + self.assertEqual(expected, result) + class TestNautobotUtils(TestCase): """Test infoblox.utils.nautobot.py.""" diff --git a/nautobot_ssot/tests/itential/__init__.py b/nautobot_ssot/tests/itential/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nautobot_ssot/tests/itential/fixtures/__init__.py b/nautobot_ssot/tests/itential/fixtures/__init__.py new file mode 100644 index 000000000..ab70fe894 --- /dev/null +++ b/nautobot_ssot/tests/itential/fixtures/__init__.py @@ -0,0 +1 @@ +"""Itential SSoT Fixtures.""" diff --git a/nautobot_ssot/tests/itential/fixtures/base.py b/nautobot_ssot/tests/itential/fixtures/base.py new file mode 100644 index 000000000..623f854ec --- /dev/null +++ b/nautobot_ssot/tests/itential/fixtures/base.py @@ -0,0 +1,160 @@ +"""Itential SSoT Base TestCase.""" + +import os +import unittest +import requests_mock + +# from unittest import TestCase + +from nautobot.apps.testing import TestCase +from nautobot.apps.testing import TransactionTestCase + +from nautobot.extras.models import Status + +from nautobot_ssot.integrations.itential.models import AutomationGatewayModel +from nautobot_ssot.integrations.itential.diffsync.adapters import itential, nautobot +from nautobot_ssot.tests.itential.fixtures import gateways, urls, clients, devices + + +class ItentialSSoTBaseTestCase(TestCase): + """Itential Automation Gateway Client Test Cases.""" + + def setUp(self): + """Setup test cases.""" + self.job = unittest.mock.MagicMock() + self.requests_mock = requests_mock.Mocker() + self.requests_mock.start() + + for device in gateways.gateways: + os.environ[device.get("username_env")] = "testUser" + os.environ[device.get("password_env")] = "testPass" + os.environ[device.get("ansible_vault_env")] = "testAnsibleVaultKey" + os.environ[device.get("device_user_env")] = "testDeviceUser" + os.environ[device.get("device_pass_env")] = "testDevicePass" + + gateways.update_or_create_automation_gateways( + name=device.get("name"), + description=device.get("description"), + location=device.get("location"), + region=device.get("region"), + gateway=device.get("gateway"), + enabled=device.get("enabled"), + username_env=device.get("username_env"), + password_env=device.get("password_env"), + ansible_vault_env=device.get("ansible_vault_env"), + device_user_env=device.get("device_user_env"), + device_pass_env=device.get("device_pass_env"), + secret_group=device.get("secret_group"), + ) + + for url_item in urls.data: + self.requests_mock.register_uri( + method=url_item.get("method"), + url=url_item.get("url"), + json=url_item.get("json"), + status_code=url_item.get("status_code", 200), + headers=url_item.get("headers", {}), + cookies=url_item.get("cookies", {}), + ) + + for device in devices.data: + devices.update_or_create_device_object( + status=device.get("status"), + role=device.get("role"), + name=device.get("name"), + location=device.get("location"), + manufacturer=device.get("manufacturer"), + platform=device.get("platform"), + network_driver=device.get("network_driver"), + model=device.get("model"), + interface=device.get("interface"), + ip_address=device.get("ip_address"), + config_context=device.get("config_context"), + ) + + self.status, _ = Status.objects.get_or_create(name="Active") + self.gateway = AutomationGatewayModel.objects.first() + self.client = clients.api_client(self.gateway) + self.itential_adapter = itential.ItentialAnsibleDeviceAdapter(api_client=self.client, job=self.job, sync=None) + self.nautobot_adapter = nautobot.NautobotAnsibleDeviceAdapter( + job=self.job, gateway=self.gateway, status=self.status, sync=None + ) + + self.itential_adapter.load() + self.nautobot_adapter.load() + + def tearDown(self): + """Teardown test cases.""" + self.requests_mock.stop() + + +class ItentialSSoTBaseTransactionTestCase(TransactionTestCase): + """Itential Automation Gateway Client Test Cases.""" + + def setUp(self): + """Setup test cases.""" + self.job = unittest.mock.MagicMock() + self.requests_mock = requests_mock.Mocker() + self.requests_mock.start() + + for device in gateways.gateways: + os.environ[device.get("username_env")] = "testUser" + os.environ[device.get("password_env")] = "testPass" + os.environ[device.get("ansible_vault_env")] = "testAnsibleVaultKey" + os.environ[device.get("device_user_env")] = "testDeviceUser" + os.environ[device.get("device_pass_env")] = "testDevicePass" + + gateways.update_or_create_automation_gateways( + name=device.get("name"), + description=device.get("description"), + location=device.get("location"), + region=device.get("region"), + gateway=device.get("gateway"), + enabled=device.get("enabled"), + username_env=device.get("username_env"), + password_env=device.get("password_env"), + ansible_vault_env=device.get("ansible_vault_env"), + device_user_env=device.get("device_user_env"), + device_pass_env=device.get("device_pass_env"), + secret_group=device.get("secret_group"), + ) + + for url_item in urls.data: + self.requests_mock.register_uri( + method=url_item.get("method"), + url=url_item.get("url"), + json=url_item.get("json"), + status_code=url_item.get("status_code", 200), + headers=url_item.get("headers", {}), + cookies=url_item.get("cookies", {}), + ) + + for device in devices.data: + devices.update_or_create_device_object( + status=device.get("status"), + role=device.get("role"), + name=device.get("name"), + location=device.get("location"), + manufacturer=device.get("manufacturer"), + platform=device.get("platform"), + network_driver=device.get("network_driver"), + model=device.get("model"), + interface=device.get("interface"), + ip_address=device.get("ip_address"), + config_context=device.get("config_context"), + ) + + self.status, _ = Status.objects.get_or_create(name="Active") + self.gateway = AutomationGatewayModel.objects.first() + self.client = clients.api_client(self.gateway) + self.itential_adapter = itential.ItentialAnsibleDeviceAdapter(api_client=self.client, job=self.job, sync=None) + self.nautobot_adapter = nautobot.NautobotAnsibleDeviceAdapter( + job=self.job, gateway=self.gateway, status=self.status, sync=None + ) + + self.itential_adapter.load() + self.nautobot_adapter.load() + + def tearDown(self): + """Teardown test cases.""" + self.requests_mock.stop() diff --git a/nautobot_ssot/tests/itential/fixtures/clients.py b/nautobot_ssot/tests/itential/fixtures/clients.py new file mode 100644 index 000000000..9698590b5 --- /dev/null +++ b/nautobot_ssot/tests/itential/fixtures/clients.py @@ -0,0 +1,24 @@ +"""Itential SSoT API Clients fixtures.""" + +import unittest + +from nautobot.extras.choices import SecretsGroupAccessTypeChoices, SecretsGroupSecretTypeChoices + +from nautobot_ssot.integrations.itential.models import AutomationGatewayModel +from nautobot_ssot.integrations.itential.clients import AutomationGatewayClient + + +def api_client(device_obj: AutomationGatewayModel, job: object = unittest.mock.MagicMock()) -> AutomationGatewayClient: + """Initialize API Client.""" + + return AutomationGatewayClient( + host=device_obj.gateway.remote_url, + username=device_obj.gateway.secrets_group.get_secret_value( + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, secret_type=SecretsGroupSecretTypeChoices.TYPE_USERNAME + ), + password=device_obj.gateway.secrets_group.get_secret_value( + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, secret_type=SecretsGroupSecretTypeChoices.TYPE_PASSWORD + ), + job=job, + verify_ssl=device_obj.gateway.verify_ssl, + ) diff --git a/nautobot_ssot/tests/itential/fixtures/devices.py b/nautobot_ssot/tests/itential/fixtures/devices.py new file mode 100644 index 000000000..fc007666b --- /dev/null +++ b/nautobot_ssot/tests/itential/fixtures/devices.py @@ -0,0 +1,118 @@ +"""Itential SsoT Nautobot device fixtures.""" + +from django.contrib.contenttypes.models import ContentType + +from nautobot.dcim.models import ( + Location, + LocationType, + Manufacturer, + Platform, + Device, + DeviceType, + Interface, +) +from nautobot.extras.models import Status, Role +from nautobot.ipam.models import Prefix, IPAddress, Namespace + + +data = [ + { + "name": "rtr1.example.net", + "location": "North America", + "manufacturer": "Cisco", + "model": "Cisco 2901", + "interface": "gigabitEthernet0/1", + "ip_address": "192.0.2.1", + "platform": "Cisco IOS", + "network_driver": "cisco_ios", + "role": "Router", + "status": "Active", + "config_context": {"ansible_port": 22, "ansible_connection": "ansible.netcommon.network_cli"}, + }, + { + "name": "rtr2.example.net", + "location": "North America", + "manufacturer": "Cisco", + "model": "Cisco 2901", + "interface": "gigabitEthernet0/1", + "ip_address": None, + "platform": "Cisco IOS", + "network_driver": "cisco_ios", + "role": "Router", + "status": "Active", + }, + { + "name": "rtr10.example.net", + "location": "North America", + "manufacturer": "Cisco", + "model": "Cisco 2901", + "interface": "gigabitEthernet0/1", + "ip_address": "192.0.2.10", + "platform": "Cisco IOS", + "network_driver": "cisco_ios", + "role": "Router", + "status": "Active", + }, + { + "name": "rtr11.example.net", + "location": "North America", + "manufacturer": "Cisco", + "model": "NCS 5501", + "interface": "managementEthernet0/0/0/1", + "ip_address": "192.0.2.11", + "platform": "Cisco IOS-XR", + "network_driver": "cisco_xr", + "role": "Router", + "status": "Active", + }, +] + + +def add_content_type(model: object, content_type: object, changed: bool): + """Add a content type to a model.""" + + if changed: + model.content_types.add(content_type) + + model.save() + + +def update_or_create_device_object( + status: str, + role: str, + name: str, + location: str, + manufacturer: str, + platform: str, + network_driver: str, + model: str, + interface: str, + ip_address: str, + config_context: dict = {}, +): # pylint: disable=dangerous-default-value,too-many-arguments,too-many-locals + """Create or update device fixtures.""" + status, _ = Status.objects.get_or_create(name=status) + namespace, _ = Namespace.objects.get_or_create(name="Global") + Prefix.objects.update_or_create(prefix="192.0.2.0/24", namespace=namespace, status=status) + device_content_type = ContentType.objects.get_for_model(Device) + role, role_changed = Role.objects.update_or_create(name=role) + add_content_type(model=role, content_type=device_content_type, changed=role_changed) + location_type, location_type_changed = LocationType.objects.get_or_create(name="Region") + add_content_type(model=location_type, content_type=device_content_type, changed=location_type_changed) + location, _ = Location.objects.get_or_create(name=location, location_type=location_type, status=status) + manufacturer, _ = Manufacturer.objects.update_or_create(name=manufacturer) + platform, _ = Platform.objects.update_or_create( + name=platform, manufacturer=manufacturer, network_driver=network_driver + ) + device_type, _ = DeviceType.objects.update_or_create(manufacturer=manufacturer, model=model) + device, _ = Device.objects.update_or_create( + name=name, role=role, device_type=device_type, location=location, status=status, platform=platform + ) + interface, _ = Interface.objects.update_or_create(name=interface, status=status, device=device) + + if ip_address: + ip_address, _ = IPAddress.objects.update_or_create(host=ip_address, mask_length=32, status=status) + ip_address.primary_ip4_for.add(device) + + device.local_config_context_data = config_context + device.save() diff --git a/nautobot_ssot/tests/itential/fixtures/gateways.py b/nautobot_ssot/tests/itential/fixtures/gateways.py new file mode 100644 index 000000000..78926179c --- /dev/null +++ b/nautobot_ssot/tests/itential/fixtures/gateways.py @@ -0,0 +1,248 @@ +"""Itential Automation Gateway Fixtures.""" + +from nautobot.extras.models import Secret, SecretsGroup, SecretsGroupAssociation, ExternalIntegration, Status +from nautobot.extras.choices import SecretsGroupAccessTypeChoices, SecretsGroupSecretTypeChoices +from nautobot.dcim.models import LocationType, Location + +from nautobot_ssot.integrations.itential.models import AutomationGatewayModel + +gateways = [ + { + "name": "IAG1", + "description": "Test IAG 1", + "region": "North America", + "gateway": "https://iag1.example.com:8443", + "enabled": True, + "username_env": "IAG1_USERNAME", + "password_env": "IAG1_PASSWORD", + "ansible_vault_env": "IAG1_VAULT", + "device_user_env": "IAG1_DEVICE_USER", + "device_pass_env": "IAG1_DEVICE_PASS", + "secret_group": "testGroup1", + }, + { + "name": "IAG10", + "description": "Test IAG 10", + "region": "North America", + "gateway": "https://iag10.example.com:8443", + "enabled": False, + "username_env": "IAG1_USERNAME", + "password_env": "IAG1_PASSWORD", + "ansible_vault_env": "IAG1_VAULT", + "device_user_env": "IAG1_DEVICE_USER", + "device_pass_env": "IAG1_DEVICE_PASS", + "secret_group": "testGroup1", + }, + { + "name": "IAG2", + "description": "Test IAG 2", + "region": "Europe", + "gateway": "https://iag2.example.com:8443", + "enabled": True, + "username_env": "IAG2_USERNAME", + "password_env": "IAG2_PASSWORD", + "ansible_vault_env": "IAG2_VAULT", + "device_user_env": "IAG2_DEVICE_USER", + "device_pass_env": "IAG2_DEVICE_PASS", + "secret_group": "testGroup2", + }, +] + +responses = { + "iag1": { + "hostname": "https://iag1.example.com:8443", + "responses": { + "login": {"token": "abc123="}, + "logout": "User was successfully logged out of session", + "poll": {"success": True, "hostname": "localhost", "serverId": "00:00:00:00:00:00:8443"}, + "get_devices": { + "meta": { + "count": 1, + "query_object": {"offset": None, "limit": None, "filter": None, "order": "ascending"}, + "total_count": 1, + }, + "data": [ + { + "name": "rtr1.example.net", + "variables": { + "ansible_host": "192.0.2.1", + "ansible_network_os": "cisco.ios.ios", + "ansible_connection": "ansible.netcommon.network_cli", + "ansible_port": 22, + }, + }, + { + "name": "rtr10.example.net", + "variables": { + "ansible_host": "192.0.2.1", + }, + }, + { + "name": "rtr12.example.net", + "variables": { + "ansible_host": "192.0.2.12", + }, + }, + ], + }, + "get_device": { + "name": "rtr1.example.net", + "variables": { + "ansible_host": "192.0.2.1", + "ansible_network_os": "cisco.ios.ios", + "ansible_connection": "ansible.netcommon.network_cli", + "ansible_port": 22, + }, + }, + "create_device": { + "name": "rtr11.example.net", + "variables": {"ansible_host": "192.0.2.11", "ansible_network_os": "cisco.iosxr.iosxr"}, + }, + "update_device": { + "name": "rtr10.example.net", + "variables": { + "ansible_host": "192.0.2.10", + "ansible_network_os": "cisco.ios.ios", + }, + }, + "delete_device": {"code": 200, "status": 200, "message": "deleted"}, + "get_groups": { + "meta": { + "count": 1, + "query_object": {"offset": None, "limit": None, "filter": None, "order": "ascending"}, + "total_count": 1, + }, + "data": [ + { + "name": "rtr1.example.net", + "variables": {"ansible_user": "testUser", "ansible_password": "testPass"}, + "devices": ["rtr1.example.net"], + "childGroups": [], + } + ], + }, + "get_group": { + "name": "all", + "variables": {"ansible_user": "testUser", "ansible_password": "testPass"}, + "devices": ["rtr1.example.net"], + "childGroups": [], + }, + "create_group": { + "name": "test-group", + "variables": {}, + "devices": [], + "childGroups": [], + }, + "update_group": { + "name": "test-group", + "variables": {"key": "value"}, + "devices": [], + "childGroups": [], + }, + "delete_group": {"code": 200, "status": 200, "message": "deleted"}, + "add_device_to_group": ["rtr1.example.net"], + "delete_device_from_group": {"code": 200, "status": 200, "message": "deleted"}, + }, + }, +} + + +def update_or_create_automation_gateways( + name: str, + description: str, + location: str, + region: str, + gateway: str, + enabled: bool, + username_env: str, + password_env: str, + ansible_vault_env: str, + device_user_env: str, + device_pass_env: str, + secret_group: str, +): # pylint: disable=too-many-arguments,too-many-locals + """Fixture to populate Automation Gateways.""" + # Fetch the active status + status, _ = Status.objects.get_or_create(name="Active") + + # Create a region location type + location_type, _ = LocationType.objects.update_or_create(name="Region") + + # Create a region location + location, _ = Location.objects.update_or_create(name=region, location_type=location_type, status=status) + + # Create a REST username secret + secret_username, _ = Secret.objects.update_or_create( + name=username_env, provider="environment-variable", parameters={"variable": username_env} + ) + + # Create a REST password secret + secret_password, _ = Secret.objects.update_or_create( + name=password_env, provider="environment-variable", parameters={"variable": password_env} + ) + + # Create Ansible VAULT secret + ansible_vault, _ = Secret.objects.update_or_create( + name=ansible_vault_env, provider="environment-variable", parameters={"variable": ansible_vault_env} + ) + + # Create Device user secret + device_user, _ = Secret.objects.update_or_create( + name=device_user_env, provider="environment-variable", parameters={"variable": device_user_env} + ) + + # Create Device pass secret + device_pass, _ = Secret.objects.update_or_create( + name=device_pass_env, provider="environment-variable", parameters={"variable": device_pass_env} + ) + + # Create a secrets group + secret_group, _ = SecretsGroup.objects.update_or_create(name=secret_group) + + # Associate the REST username with the secrets group + SecretsGroupAssociation.objects.update_or_create( + secrets_group=secret_group, + secret=secret_username, + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_USERNAME, + ) + + # Associate the REST password with the secrets group + SecretsGroupAssociation.objects.update_or_create( + secrets_group=secret_group, + secret=secret_password, + access_type=SecretsGroupAccessTypeChoices.TYPE_REST, + secret_type=SecretsGroupSecretTypeChoices.TYPE_PASSWORD, + ) + + # Associate the Ansible Vault secret with the secrets group + SecretsGroupAssociation.objects.update_or_create( + secrets_group=secret_group, + secret=ansible_vault, + access_type=SecretsGroupAccessTypeChoices.TYPE_GENERIC, + secret_type=SecretsGroupSecretTypeChoices.TYPE_KEY, + ) + + # Associate the Device username with the secrets group + SecretsGroupAssociation.objects.update_or_create( + secrets_group=secret_group, + secret=device_user, + access_type=SecretsGroupAccessTypeChoices.TYPE_GENERIC, + secret_type=SecretsGroupSecretTypeChoices.TYPE_USERNAME, + ) + + # Associate the Device password with the secrets group + SecretsGroupAssociation.objects.update_or_create( + secrets_group=secret_group, + secret=device_pass, + access_type=SecretsGroupAccessTypeChoices.TYPE_GENERIC, + secret_type=SecretsGroupSecretTypeChoices.TYPE_PASSWORD, + ) + + # Create the external integration + gateway, _ = ExternalIntegration.objects.update_or_create(name=name, remote_url=gateway, secrets_group=secret_group) + + # Create the Automation Gateway object + AutomationGatewayModel.objects.update_or_create( + name=name, description=description, location=location, gateway=gateway, enabled=enabled + ) diff --git a/nautobot_ssot/tests/itential/fixtures/urls.py b/nautobot_ssot/tests/itential/fixtures/urls.py new file mode 100644 index 000000000..968d51a1b --- /dev/null +++ b/nautobot_ssot/tests/itential/fixtures/urls.py @@ -0,0 +1,87 @@ +"""Itential SSoT URL fixtures.""" + +from nautobot_ssot.tests.itential.fixtures import gateways + + +data = [ + { + "method": "POST", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/login", + "json": gateways.responses["iag1"]["responses"].get("login"), + }, + { + "method": "POST", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/logout", + "json": gateways.responses["iag1"]["responses"].get("logout"), + }, + { + "method": "GET", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/poll", + "json": gateways.responses["iag1"]["responses"].get("poll"), + }, + { + "method": "GET", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/devices", + "json": gateways.responses["iag1"]["responses"].get("get_devices"), + }, + { + "method": "GET", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/devices/rtr1.example.net", + "json": gateways.responses["iag1"]["responses"].get("get_device"), + }, + { + "method": "POST", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/devices", + "json": gateways.responses["iag1"]["responses"].get("create_device"), + }, + { + "method": "PUT", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/devices/rtr10.example.net", + "json": gateways.responses["iag1"]["responses"].get("update_device"), + }, + { + "method": "DELETE", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/devices/rtr10.example.net", + "json": gateways.responses["iag1"]["responses"].get("delete_device"), + }, + { + "method": "DELETE", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/devices/rtr12.example.net", + "json": gateways.responses["iag1"]["responses"].get("delete_device"), + }, + { + "method": "GET", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/groups", + "json": gateways.responses["iag1"]["responses"].get("get_groups"), + }, + { + "method": "GET", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/groups/all", + "json": gateways.responses["iag1"]["responses"].get("get_group"), + }, + { + "method": "POST", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/groups", + "json": gateways.responses["iag1"]["responses"].get("create_group"), + }, + { + "method": "PUT", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/groups/test-group", + "json": gateways.responses["iag1"]["responses"].get("update_group"), + }, + { + "method": "DELETE", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/groups/test-group", + "json": gateways.responses["iag1"]["responses"].get("delete_group"), + }, + { + "method": "POST", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/groups/all/devices", + "json": gateways.responses["iag1"]["responses"].get("add_device_to_group"), + }, + { + "method": "DELETE", + "url": f"{gateways.responses['iag1'].get('hostname')}/api/v2.0/groups/all/devices/rtr12.example.net", + "json": gateways.responses["iag1"]["responses"].get("delete_device_from_group"), + }, +] diff --git a/nautobot_ssot/tests/itential/test_clients.py b/nautobot_ssot/tests/itential/test_clients.py new file mode 100644 index 000000000..e496b7036 --- /dev/null +++ b/nautobot_ssot/tests/itential/test_clients.py @@ -0,0 +1,68 @@ +"""Itential SSoT API Client Tests.""" + +from nautobot_ssot.tests.itential.fixtures.base import ItentialSSoTBaseTestCase +from nautobot_ssot.tests.itential.fixtures import gateways + + +class AutomationGatewayClientTestCase(ItentialSSoTBaseTestCase): + """Itential Automation Gateway Client Test Cases.""" + + def test_login_success(self): + """Test API client login.""" + response = self.client.login() + self.assertEqual(response, gateways.responses["iag1"]["responses"].get("login")) + + def test_get_devices_success(self): + """Test get_devices.""" + response = self.client.get_devices() + self.assertEqual(response, gateways.responses["iag1"]["responses"].get("get_devices")) + + def test_get_device_success(self): + """Test get_device.""" + response = self.client.get_device(device_name="rtr1.example.net") + self.assertEqual(response, gateways.responses["iag1"]["responses"].get("get_device")) + + def test_create_device_success(self): + """Test create_device.""" + response = self.client.create_device(device_name="rtr10.example.net", variables={}) + self.assertEqual(response, gateways.responses["iag1"]["responses"].get("create_device")) + + def test_update_device_success(self): + """Test update_device.""" + response = self.client.update_device(device_name="rtr10.example.net", variables={}) + self.assertEqual(response, gateways.responses["iag1"]["responses"].get("update_device")) + + def test_delete_device_success(self): + """Test delete_device.""" + response = self.client.delete_device(device_name="rtr10.example.net") + self.assertEqual(response, gateways.responses["iag1"]["responses"].get("delete_device")) + + def test_get_groups_success(self): + """Test get_groups.""" + response = self.client.get_groups() + self.assertEqual(response, gateways.responses["iag1"]["responses"].get("get_groups")) + + def test_get_group_success(self): + """Test get_group.""" + response = self.client.get_group(group_name="all") + self.assertEqual(response, gateways.responses["iag1"]["responses"].get("get_group")) + + def test_create_group_success(self): + """Test create_group.""" + response = self.client.create_group(group_name="test-group", variables={}) + self.assertEqual(response, gateways.responses["iag1"]["responses"].get("create_group")) + + def test_update_group_success(self): + """Test update_group.""" + response = self.client.update_group(group_name="test-group", variables={}) + self.assertEqual(response, gateways.responses["iag1"]["responses"].get("update_group")) + + def test_delete_group_success(self): + """Test delete_group.""" + response = self.client.delete_group(group_name="test-group") + self.assertEqual(response, gateways.responses["iag1"]["responses"].get("delete_group")) + + def test_logout_success(self): + """Test API client logout.""" + response = self.client.logout() + self.assertEqual(response, gateways.responses["iag1"]["responses"].get("logout")) diff --git a/nautobot_ssot/tests/itential/test_diffsync.py b/nautobot_ssot/tests/itential/test_diffsync.py new file mode 100644 index 000000000..b80ad33c7 --- /dev/null +++ b/nautobot_ssot/tests/itential/test_diffsync.py @@ -0,0 +1,18 @@ +"""Itential SSoT DiffSync tests.""" + +from nautobot_ssot.tests.itential.fixtures import base + + +class DiffSyncTestCases(base.ItentialSSoTBaseTestCase): + """DiffSync test cases.""" + + def test_diff_success(self): + """Test diff exists.""" + diff = self.nautobot_adapter.diff_to(self.itential_adapter) + self.assertTrue(diff.has_diffs()) + + def test_sync_success(self): + """Test successful sync.""" + self.nautobot_adapter.sync_to(self.itential_adapter) + diff = self.nautobot_adapter.diff_to(self.itential_adapter) + self.assertFalse(diff.has_diffs()) diff --git a/nautobot_ssot/tests/itential/test_jobs.py b/nautobot_ssot/tests/itential/test_jobs.py new file mode 100644 index 000000000..9ede55df1 --- /dev/null +++ b/nautobot_ssot/tests/itential/test_jobs.py @@ -0,0 +1,54 @@ +"""Itential SSoT Jobs Test Cases.""" + +from django.test import override_settings +from nautobot.extras.models import Job, JobLogEntry +from nautobot.apps.testing import run_job_for_testing + +from nautobot_ssot.tests.itential.fixtures import base + +from nautobot_ssot.integrations.itential.models import AutomationGatewayModel + + +@override_settings( + PLUGINS_CONFIG={ + "nautobot_ssot": { + "enable_itential": True, + } + } +) +class ItentialSSoTJobsTestCase(base.ItentialSSoTBaseTransactionTestCase): + """Itential SSoT Jobs Test Cases.""" + + databases = ("default", "job_logs") + + def test_job_success(self): + """Test successful job.""" + self.job = Job.objects.get( + job_class_name="ItentialAutomationGatewayDataTarget", + module_name="nautobot_ssot.integrations.itential.jobs", + ) + job_result = run_job_for_testing( + self.job, dryrun=False, memory_profiling=False, gateway=self.gateway.pk, status=self.status.pk + ) + log_entries = JobLogEntry.objects.filter(job_result=job_result) + self.assertGreater(log_entries.count(), 1) + log_entries = [log_entry.message for log_entry in log_entries] + summary_output = "{'create': 2, 'update': 1, 'delete': 1, 'no-change': 1, 'skip': 0}" + self.assertIn(summary_output, log_entries) + self.assertIn("Sync complete", log_entries) + + def test_job_disabled_gateway(self): + """Test job with disabled automation gateway.""" + gateway = AutomationGatewayModel.objects.get(name="IAG10") + self.job = Job.objects.get( + job_class_name="ItentialAutomationGatewayDataTarget", + module_name="nautobot_ssot.integrations.itential.jobs", + ) + job_result = run_job_for_testing( + self.job, dryrun=False, memory_profiling=False, gateway=gateway.pk, status=self.status.pk + ) + log_entries = JobLogEntry.objects.filter(job_result=job_result) + self.assertGreater(log_entries.count(), 1) + log_entries = [log_entry.message for log_entry in log_entries] + summary_output = f"{gateway.gateway.remote_url} is not enabled to sync inventory." + self.assertIn(summary_output, log_entries) diff --git a/nautobot_ssot/urls.py b/nautobot_ssot/urls.py index f7f9025ce..ee40bc5b6 100644 --- a/nautobot_ssot/urls.py +++ b/nautobot_ssot/urls.py @@ -16,6 +16,7 @@ path("history//jobresult/", views.SyncJobResultView.as_view(), name="sync_jobresult"), path("history//logs/", views.SyncLogEntriesView.as_view(), name="sync_logentries"), path("logs/", views.SyncLogEntryListView.as_view(), name="synclogentry_list"), + path("config/", views.SSOTConfigView.as_view(), name="config"), ] diff --git a/nautobot_ssot/views.py b/nautobot_ssot/views.py index 5ad225fee..4924be50e 100644 --- a/nautobot_ssot/views.py +++ b/nautobot_ssot/views.py @@ -3,20 +3,22 @@ import pprint from django.http import Http404 -from django.shortcuts import get_object_or_404 - +from django.shortcuts import get_object_or_404, render +from django.views import View as DjangoView from django_tables2 import RequestConfig - -from nautobot.extras.models import Job as JobModel from nautobot.core.views.generic import BulkDeleteView, ObjectDeleteView, ObjectListView, ObjectView +from nautobot.core.views.mixins import ContentTypePermissionRequiredMixin from nautobot.core.views.paginator import EnhancedPaginator +from nautobot.extras.models import Job as JobModel + +from nautobot_ssot.integrations import utils from .filters import SyncFilterSet, SyncLogEntryFilterSet from .forms import SyncFilterForm, SyncLogEntryFilterForm -from .jobs.base import DataSource, DataTarget from .jobs import get_data_jobs +from .jobs.base import DataSource, DataTarget from .models import Sync, SyncLogEntry -from .tables import DashboardTable, SyncTable, SyncTableSingleSourceOrTarget, SyncLogEntryTable +from .tables import DashboardTable, SyncLogEntryTable, SyncTable, SyncTableSingleSourceOrTarget class DashboardView(ObjectListView): @@ -182,3 +184,16 @@ class SyncLogEntryListView(ObjectListView): table = SyncLogEntryTable action_buttons = [] template_name = "nautobot_ssot/synclogentry_list.html" + + +class SSOTConfigView(ContentTypePermissionRequiredMixin, DjangoView): + """View with the SSOT integration configs.""" + + def get_required_permission(self): + """Permissions required for the view.""" + return "nautobot_ssot.view_ssotconfig" + + def get(self, request): + """Return table with links to configuration pages for enabled integrations.""" + enabled_integrations = list(utils.each_enabled_integration()) + return render(request, "nautobot_ssot/ssot_configs.html", {"enabled_integrations": enabled_integrations}) diff --git a/poetry.lock b/poetry.lock index e0d827150..4ec9ce2f0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2093,6 +2093,17 @@ files = [ {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a3a6a2fbbe7550ffe52d151cf76065e6b89cfb3e9d0463e49a7e322a25d0426"}, {file = "ijson-3.2.3-cp311-cp311-win32.whl", hash = "sha256:6a4db2f7fb9acfb855c9ae1aae602e4648dd1f88804a0d5cfb78c3639bcf156c"}, {file = "ijson-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:ccd6be56335cbb845f3d3021b1766299c056c70c4c9165fb2fbe2d62258bae3f"}, + {file = "ijson-3.2.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:055b71bbc37af5c3c5861afe789e15211d2d3d06ac51ee5a647adf4def19c0ea"}, + {file = "ijson-3.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c075a547de32f265a5dd139ab2035900fef6653951628862e5cdce0d101af557"}, + {file = "ijson-3.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:457f8a5fc559478ac6b06b6d37ebacb4811f8c5156e997f0d87d708b0d8ab2ae"}, + {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9788f0c915351f41f0e69ec2618b81ebfcf9f13d9d67c6d404c7f5afda3e4afb"}, + {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa234ab7a6a33ed51494d9d2197fb96296f9217ecae57f5551a55589091e7853"}, + {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdd0dc5da4f9dc6d12ab6e8e0c57d8b41d3c8f9ceed31a99dae7b2baf9ea769a"}, + {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c6beb80df19713e39e68dc5c337b5c76d36ccf69c30b79034634e5e4c14d6904"}, + {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a2973ce57afb142d96f35a14e9cfec08308ef178a2c76b8b5e1e98f3960438bf"}, + {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:105c314fd624e81ed20f925271ec506523b8dd236589ab6c0208b8707d652a0e"}, + {file = "ijson-3.2.3-cp312-cp312-win32.whl", hash = "sha256:ac44781de5e901ce8339352bb5594fcb3b94ced315a34dbe840b4cff3450e23b"}, + {file = "ijson-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:0567e8c833825b119e74e10a7c29761dc65fcd155f5d4cb10f9d3b8916ef9912"}, {file = "ijson-3.2.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eeb286639649fb6bed37997a5e30eefcacddac79476d24128348ec890b2a0ccb"}, {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:396338a655fb9af4ac59dd09c189885b51fa0eefc84d35408662031023c110d1"}, {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e0243d166d11a2a47c17c7e885debf3b19ed136be2af1f5d1c34212850236ac"}, @@ -3696,6 +3707,7 @@ files = [ {file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"}, @@ -3704,6 +3716,8 @@ files = [ {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"}, @@ -3766,6 +3780,17 @@ files = [ [package.extras] tests = ["pytest"] +[[package]] +name = "py" +version = "1.11.0" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, + {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, +] + [[package]] name = "pycares" version = "4.4.0" @@ -4292,6 +4317,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -4299,8 +4325,16 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -4317,6 +4351,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -4324,6 +4359,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -4553,6 +4589,21 @@ urllib3 = ">=1.25.10" [package.extras] tests = ["coverage (>=3.7.1,<6.0.0)", "flake8", "mypy", "pytest (>=4.6)", "pytest (>=4.6,<5.0)", "pytest-cov", "pytest-localserver", "types-mock", "types-requests", "types-six"] +[[package]] +name = "retry" +version = "0.9.2" +description = "Easy to use retry decorator." +optional = false +python-versions = "*" +files = [ + {file = "retry-0.9.2-py2.py3-none-any.whl", hash = "sha256:ccddf89761fa2c726ab29391837d4327f819ea14d244c232a1d24c67a2f98606"}, + {file = "retry-0.9.2.tar.gz", hash = "sha256:f8bfa8b99b69c4506d6f5bd3b0aabf77f98cdb17f3c9fc3f5ca820033336fba4"}, +] + +[package.dependencies] +decorator = ">=3.4.2" +py = ">=1.4.26,<2.0.0" + [[package]] name = "rfc3986" version = "1.5.0" @@ -5402,4 +5453,4 @@ servicenow = ["Jinja2", "PyYAML", "ijson", "oauthlib", "python-magic", "pytz", " [metadata] lock-version = "2.0" python-versions = ">=3.8,<3.12" -content-hash = "cd710f5c319a47e01c925b15ded4986b7dd40575ee65813234016f2511ffbbc6" +content-hash = "23849f65deb66d9d73f56dfeb1d7d4266f4f3c7cc9dda8f8b6b6bfe67e51755c" diff --git a/pyproject.toml b/pyproject.toml index 9eedc577f..a60e824be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "nautobot-ssot" -version = "2.6.1" +version = "2.7.0" description = "Nautobot Single Source of Truth" authors = ["Network to Code, LLC "] license = "Apache-2.0" @@ -51,6 +51,8 @@ requests = { version = ">=2.21.0", optional = true } requests-oauthlib = { version = ">=1.3.0", optional = true } six = { version = ">=1.13.0", optional = true } httpx = { version = ">=0.23.3", optional = true } +# Used by the Itential SSoT as a retry mechanism for HTTP failures in the AutomationGatewayClient. +retry = "^0.9.2" [tool.poetry.group.dev.dependencies] bandit = "*"