Skip to content
This repository was archived by the owner on Apr 7, 2022. It is now read-only.

Commit 4bd536d

Browse files
authored
Merge pull request #10218 from tpapaioa/fix_test_vm_retirement_from_global_region
[RFR] Fix navigation for temp appliance tests
2 parents 2e7a4cc + 825080d commit 4bd536d

File tree

3 files changed

+66
-46
lines changed

3 files changed

+66
-46
lines changed

cfme/fixtures/cli.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -279,6 +279,9 @@ def distributed_appliances(temp_appliance_preconfig_funcscope_rhevm,
279279
secondary_appliance.configure(region=0, key_address=primary_appliance.hostname,
280280
db_address=primary_appliance.hostname)
281281

282+
primary_appliance.browser_steal = True
283+
secondary_appliance.browser_steal = True
284+
282285
return primary_appliance, secondary_appliance
283286

284287

@@ -299,6 +302,9 @@ def replicated_appliances(temp_appliance_preconfig_funcscope_rhevm,
299302
global_appliance.add_pglogical_replication_subscription(remote_appliance.hostname)
300303
logger.info("Finished appliance replication configuration.")
301304

305+
remote_appliance.browser_steal = True
306+
global_appliance.browser_steal = True
307+
302308
return remote_appliance, global_appliance
303309

304310

@@ -318,6 +324,9 @@ def replicated_appliances_preupdate(multiple_preupdate_appliances):
318324
global_appliance.add_pglogical_replication_subscription(remote_appliance.hostname)
319325
logger.info("Finished appliance replication configuration.")
320326

327+
global_appliance.browser_steal = True
328+
remote_appliance.browser_steal = True
329+
321330
return remote_appliance, global_appliance
322331

323332

cfme/tests/cloud_infra_common/test_retirement.py

Lines changed: 21 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from cfme.services.requests import RequestsView
1515
from cfme.utils.appliance.implementations.ui import navigate_to
1616
from cfme.utils.appliance.implementations.ui import navigator
17+
from cfme.utils.blockers import BZ
1718
from cfme.utils.log import logger
1819
from cfme.utils.providers import ProviderFilter
1920
from cfme.utils.wait import wait_for
@@ -434,11 +435,8 @@ def test_resume_retired_instance(create_vm, provider, remove_date):
434435
@pytest.mark.long_running
435436
@test_requirements.multi_region
436437
@test_requirements.retirement
437-
def test_vm_retirement_from_global_region(setup_multi_region_cluster,
438-
multi_region_cluster,
439-
activate_global_appliance,
440-
setup_remote_provider,
441-
create_vm):
438+
@pytest.mark.meta(blockers=[BZ(1839770)])
439+
def test_vm_retirement_from_global_region(replicated_appliances, create_vm):
442440
"""
443441
Retire a VM via Centralized Administration
444442
@@ -456,15 +454,29 @@ def test_vm_retirement_from_global_region(setup_multi_region_cluster,
456454
2. VM transitions to Retired state in the Global and Remote region.
457455
458456
"""
457+
remote_appliance, global_appliance = replicated_appliances
458+
459459
expected_date = {}
460460
expected_date['start'] = datetime.utcnow() + timedelta(minutes=-5)
461461

462-
create_vm.retire()
462+
provider = create_vm.provider
463463

464-
verify_retirement_state(create_vm)
464+
# Instantiate on each appliance so that browser uses the correct appliance.
465+
vm_per_appliance = {
466+
a: a.provider_based_collection(provider).instantiate(create_vm.name, provider)
467+
for a in replicated_appliances
468+
}
465469

466-
expected_date['end'] = datetime.utcnow() + timedelta(minutes=5)
467-
verify_retirement_date(create_vm, expected_date=expected_date)
470+
with remote_appliance:
471+
provider.create()
472+
473+
with global_appliance:
474+
vm_per_appliance[global_appliance].retire()
475+
476+
with remote_appliance:
477+
verify_retirement_state(vm_per_appliance[remote_appliance])
478+
expected_date['end'] = datetime.utcnow() + timedelta(minutes=5)
479+
verify_retirement_date(vm_per_appliance[remote_appliance], expected_date=expected_date)
468480

469481

470482
@pytest.mark.manual

cfme/tests/distributed/test_appliance_replication.py

Lines changed: 36 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -40,12 +40,10 @@ def test_appliance_replicate_between_regions(provider, replicated_appliances):
4040
"""
4141
remote_appliance, global_appliance = replicated_appliances
4242

43-
remote_appliance.browser_steal = True
4443
with remote_appliance:
4544
provider.create()
4645
remote_appliance.collections.infra_providers.wait_for_a_provider()
4746

48-
global_appliance.browser_steal = True
4947
with global_appliance:
5048
global_appliance.collections.infra_providers.wait_for_a_provider()
5149
assert provider.exists
@@ -69,12 +67,10 @@ def test_external_database_appliance(provider, distributed_appliances):
6967
"""
7068
primary_appliance, secondary_appliance = distributed_appliances
7169

72-
primary_appliance.browser_steal = True
7370
with primary_appliance:
7471
provider.create()
7572
primary_appliance.collections.infra_providers.wait_for_a_provider()
7673

77-
secondary_appliance.browser_steal = True
7874
with secondary_appliance:
7975
secondary_appliance.collections.infra_providers.wait_for_a_provider()
8076
assert provider.exists
@@ -101,12 +97,10 @@ def test_appliance_replicate_database_disconnection(provider, replicated_applian
10197
sleep(60)
10298
global_appliance.db_service.start()
10399

104-
remote_appliance.browser_steal = True
105100
with remote_appliance:
106101
provider.create()
107102
remote_appliance.collections.infra_providers.wait_for_a_provider()
108103

109-
global_appliance.browser_steal = True
110104
with global_appliance:
111105
global_appliance.collections.infra_providers.wait_for_a_provider()
112106
assert provider.exists
@@ -129,15 +123,13 @@ def test_appliance_replicate_database_disconnection_with_backlog(provider, repli
129123
"""
130124
remote_appliance, global_appliance = replicated_appliances
131125

132-
remote_appliance.browser_steal = True
133126
with remote_appliance:
134127
provider.create()
135128
global_appliance.db_service.stop()
136129
sleep(60)
137130
global_appliance.db_service.start()
138131
remote_appliance.collections.infra_providers.wait_for_a_provider()
139132

140-
global_appliance.browser_steal = True
141133
with global_appliance:
142134
global_appliance.collections.infra_providers.wait_for_a_provider()
143135
assert provider.exists
@@ -163,16 +155,20 @@ def test_replication_vm_power_control(provider, create_vm, context, replicated_a
163155
"""
164156
remote_appliance, global_appliance = replicated_appliances
165157

166-
remote_appliance.browser_steal = True
158+
vm_per_appliance = {
159+
a: a.provider_based_collection(provider).instantiate(create_vm.name, provider)
160+
for a in replicated_appliances
161+
}
162+
167163
with remote_appliance:
168164
assert provider.create(validate_inventory=True), "Could not create provider."
169165

170-
global_appliance.browser_steal = True
171166
with global_appliance:
172-
create_vm.power_control_from_cfme(option=create_vm.POWER_OFF, cancel=False)
173-
navigate_to(create_vm.provider, 'Details')
174-
create_vm.wait_for_vm_state_change(desired_state=create_vm.STATE_OFF, timeout=900)
175-
assert create_vm.find_quadicon().data['state'] == 'off', "Incorrect VM quadicon state"
167+
vm = vm_per_appliance[global_appliance]
168+
vm.power_control_from_cfme(option=vm.POWER_OFF, cancel=False)
169+
# navigate_to(provider, 'Details')
170+
vm.wait_for_vm_state_change(desired_state=vm.STATE_OFF, timeout=900)
171+
assert vm.find_quadicon().data['state'] == 'off', "Incorrect VM quadicon state"
176172
assert not create_vm.mgmt.is_running, "VM is still running"
177173

178174

@@ -198,16 +194,17 @@ def test_replication_connect_to_vm_in_region(provider, replicated_appliances):
198194

199195
vm_name = provider.data['cap_and_util']['chargeback_vm']
200196

201-
remote_appliance.browser_steal = True
197+
vm_per_appliance = {
198+
a: a.provider_based_collection(provider).instantiate(vm_name, provider)
199+
for a in replicated_appliances
200+
}
201+
202202
with remote_appliance:
203203
provider.create()
204204
remote_appliance.collections.infra_providers.wait_for_a_provider()
205205

206-
global_appliance.browser_steal = True
207206
with global_appliance:
208-
collection = global_appliance.provider_based_collection(provider)
209-
vm = collection.instantiate(vm_name, provider)
210-
view = navigate_to(vm, 'Details')
207+
view = navigate_to(vm_per_appliance[global_appliance], 'Details')
211208

212209
initial_count = len(view.browser.window_handles)
213210
main_window = view.browser.current_window_handle
@@ -233,7 +230,9 @@ def test_replication_connect_to_vm_in_region(provider, replicated_appliances):
233230
'password': conf.credentials['default']['password']
234231
})
235232
view.login.click()
236-
view = vm.create_view(InfraVmDetailsView)
233+
234+
# Use VM instantiated on global_appliance here because we're still using the same browser.
235+
view = vm_per_appliance[global_appliance].create_view(InfraVmDetailsView)
237236
wait_for(lambda: view.is_displayed, message="Wait for VM Details page")
238237

239238

@@ -265,7 +264,6 @@ def test_appliance_httpd_roles(distributed_appliances):
265264
sid = secondary_appliance.server.sid
266265
secondary_server = primary_appliance.collections.servers.instantiate(sid=sid)
267266

268-
primary_appliance.browser_steal = True
269267
with primary_appliance:
270268
view = navigate_to(secondary_server, 'Server')
271269

@@ -348,7 +346,6 @@ def test_server_role_failover(distributed_appliances):
348346

349347
# Enable all roles on both appliances.
350348
for appliance in distributed_appliances:
351-
appliance.browser_steal = True
352349
with appliance:
353350
view = navigate_to(appliance.server, 'Server')
354351
view.server_roles.fill(fill_values)
@@ -393,9 +390,10 @@ def test_appliance_replicate_zones(replicated_appliances):
393390
global_zone = 'global-A'
394391
global_appliance.collections.zones.create(name=global_zone, description=global_zone)
395392

396-
view = navigate_to(global_appliance.server, 'Server')
397-
global_zones = [o.text for o in view.basic_information.appliance_zone.all_options]
398-
assert global_zone in global_zones and remote_zone not in global_zones
393+
with global_appliance:
394+
view = navigate_to(global_appliance.server, 'Server')
395+
global_zones = [o.text for o in view.basic_information.appliance_zone.all_options]
396+
assert global_zone in global_zones and remote_zone not in global_zones
399397

400398

401399
@pytest.mark.tier(2)
@@ -419,15 +417,16 @@ def test_appliance_replicate_remote_down(replicated_appliances):
419417
"""
420418
remote_appliance, global_appliance = replicated_appliances
421419

422-
global_region = global_appliance.server.zone.region
423-
assert global_region.replication.get_replication_status(host=remote_appliance.hostname), (
424-
"Remote appliance not found on Replication tab after initial configuration.")
425-
426-
result = global_appliance.ssh_client.run_command(
427-
f"firewall-cmd --direct --add-rule ipv4 filter OUTPUT 0 -d {remote_appliance.hostname}"
428-
" -j DROP")
429-
assert result.success, "Could not create firewall rule on global appliance."
430-
431-
global_appliance.browser.widgetastic.refresh()
432-
assert global_region.replication.get_replication_status(host=remote_appliance.hostname), (
433-
"Remote appliance not found on Replication tab after dropped connection.")
420+
with global_appliance:
421+
global_region = global_appliance.server.zone.region
422+
assert global_region.replication.get_replication_status(host=remote_appliance.hostname), (
423+
"Remote appliance not found on Replication tab after initial configuration.")
424+
425+
result = global_appliance.ssh_client.run_command(
426+
f"firewall-cmd --direct --add-rule ipv4 filter OUTPUT 0 -d {remote_appliance.hostname}"
427+
" -j DROP")
428+
assert result.success, "Could not create firewall rule on global appliance."
429+
430+
global_appliance.browser.widgetastic.refresh()
431+
assert global_region.replication.get_replication_status(host=remote_appliance.hostname), (
432+
"Remote appliance not found on Replication tab after dropped connection.")

0 commit comments

Comments
 (0)