|
29 | 29 | from aws_advanced_python_wrapper import AwsWrapperConnection |
30 | 30 | from aws_advanced_python_wrapper.errors import (FailoverSuccessError, |
31 | 31 | ReadWriteSplittingError) |
| 32 | +from aws_advanced_python_wrapper.hostinfo import HostRole |
32 | 33 | from aws_advanced_python_wrapper.utils.log import Logger |
33 | 34 | from aws_advanced_python_wrapper.utils.properties import (Properties, |
34 | 35 | WrapperProperties) |
@@ -195,76 +196,183 @@ def test_custom_endpoint_failover(self, test_driver: TestDriver, conn_utils, pro |
195 | 196 |
|
196 | 197 | conn.close() |
197 | 198 |
|
198 | | - def test_custom_endpoint_read_write_splitting__with_custom_endpoint_changes( |
| 199 | + def _setup_custom_endpoint_role(self, target_driver_connect, conn_kwargs, rds_utils, host_role: HostRole): |
| 200 | + self.logger.debug("Setting up custom endpoint instance with role: " + host_role.name) |
| 201 | + props = {'plugins': ''} |
| 202 | + original_writer = rds_utils.get_cluster_writer_instance_id() |
| 203 | + failover_target = None |
| 204 | + with AwsWrapperConnection.connect(target_driver_connect, **conn_kwargs, **props) as conn: |
| 205 | + endpoint_members = self.endpoint_info["StaticMembers"] |
| 206 | + original_instance_id = rds_utils.query_instance_id(conn) |
| 207 | + self.logger.debug("Original instance id: " + original_instance_id) |
| 208 | + assert original_instance_id in endpoint_members |
| 209 | + |
| 210 | + if host_role == HostRole.WRITER: |
| 211 | + if original_instance_id == original_writer: |
| 212 | + self.logger.debug("Role is already " + host_role.name + ", no failover needed.") |
| 213 | + return # Do nothing, no need to failover. |
| 214 | + failover_target = original_instance_id |
| 215 | + self.logger.debug("Failing over to get writer role...") |
| 216 | + elif host_role == HostRole.READER: |
| 217 | + if original_instance_id != original_writer: |
| 218 | + self.logger.debug("Role is already " + host_role.name + ", no failover needed.") |
| 219 | + return # Do nothing, no need to failover. |
| 220 | + self.logger.debug("Failing over to get reader role...") |
| 221 | + |
| 222 | + rds_utils.failover_cluster_and_wait_until_writer_changed(target_id=failover_target) |
| 223 | + |
| 224 | + self.logger.debug("Verifying that new connection has role: " + host_role.name) |
| 225 | + # Verify that new connection is now the correct role |
| 226 | + with AwsWrapperConnection.connect(target_driver_connect, **conn_kwargs, **props) as conn: |
| 227 | + endpoint_members = self.endpoint_info["StaticMembers"] |
| 228 | + original_instance_id = rds_utils.query_instance_id(conn) |
| 229 | + assert original_instance_id in endpoint_members |
| 230 | + |
| 231 | + new_role = rds_utils.query_host_role(conn, TestEnvironment.get_current().get_engine()) |
| 232 | + assert new_role == host_role |
| 233 | + self.logger.debug("Custom endpoint instance successfully set to role: " + host_role.name) |
| 234 | + |
| 235 | + def test_custom_endpoint_read_write_splitting__with_custom_endpoint_changes__with_reader_as_init_conn( |
199 | 236 | self, test_driver: TestDriver, conn_utils, props, rds_utils): |
| 237 | + ''' |
| 238 | + Will test for the following scenario: |
| 239 | + 1. Initially connect to a reader instance via the custom endpoint. |
| 240 | + 2. Attempt to switch to writer instance - should fail since the custom endpoint only has the reader instance. |
| 241 | + 3. Modify the custom endpoint to add the writer instance as a static member. |
| 242 | + 4. Switch to writer instance - should succeed. |
| 243 | + 5. Switch back to reader instance - should succeed. |
| 244 | + 6. Modify the custom endpoint to remove the writer instance as a static member. |
| 245 | + 7. Attempt to switch to writer instance - should fail since the custom endpoint no longer has the writer instance. |
| 246 | + ''' |
200 | 247 | target_driver_connect = DriverHelper.get_connect_func(test_driver) |
201 | 248 | kwargs = conn_utils.get_connect_params() |
202 | 249 | kwargs["host"] = self.endpoint_info["Endpoint"] |
203 | 250 | # This setting is not required for the test, but it allows us to also test re-creation of expired monitors since |
204 | 251 | # it takes more than 30 seconds to modify the cluster endpoint (usually around 140s). |
205 | 252 | props["custom_endpoint_idle_monitor_expiration_ms"] = 30_000 |
206 | 253 | props["wait_for_custom_endpoint_info_timeout_ms"] = 30_000 |
207 | | - conn = AwsWrapperConnection.connect(target_driver_connect, **kwargs, **props) |
208 | 254 |
|
| 255 | + # Ensure that we are starting with a reader connection |
| 256 | + self._setup_custom_endpoint_role(target_driver_connect, kwargs, rds_utils, HostRole.READER) |
| 257 | + |
| 258 | + conn = AwsWrapperConnection.connect(target_driver_connect, **kwargs, **props) |
209 | 259 | endpoint_members = self.endpoint_info["StaticMembers"] |
210 | | - original_instance_id = rds_utils.query_instance_id(conn) |
211 | | - assert original_instance_id in endpoint_members |
| 260 | + original_reader_id = rds_utils.query_instance_id(conn) |
| 261 | + assert original_reader_id in endpoint_members |
212 | 262 |
|
213 | 263 | # Attempt to switch to an instance of the opposite role. This should fail since the custom endpoint consists |
214 | 264 | # only of the current host. |
215 | | - new_read_only_value = original_instance_id == rds_utils.get_cluster_writer_instance_id() |
216 | | - if new_read_only_value: |
217 | | - # We are connected to the writer. Attempting to switch to the reader will not work but will intentionally |
218 | | - # not throw an exception. In this scenario we log a warning and purposefully stick with the writer. |
219 | | - self.logger.debug("Initial connection is to the writer. Attempting to switch to reader...") |
220 | | - conn.read_only = new_read_only_value |
221 | | - new_instance_id = rds_utils.query_instance_id(conn) |
222 | | - assert new_instance_id == original_instance_id |
223 | | - else: |
224 | | - # We are connected to the reader. Attempting to switch to the writer will throw an exception. |
225 | | - self.logger.debug("Initial connection is to a reader. Attempting to switch to writer...") |
226 | | - with pytest.raises(ReadWriteSplittingError): |
227 | | - conn.read_only = new_read_only_value |
| 265 | + self.logger.debug("Initial connection is to a reader. Attempting to switch to writer...") |
| 266 | + with pytest.raises(ReadWriteSplittingError): |
| 267 | + conn.read_only = False |
228 | 268 |
|
229 | | - instances = TestEnvironment.get_current().get_instances() |
230 | 269 | writer_id = rds_utils.get_cluster_writer_instance_id() |
231 | | - if original_instance_id == writer_id: |
232 | | - new_member = instances[1].get_instance_id() |
233 | | - else: |
234 | | - new_member = writer_id |
235 | 270 |
|
236 | 271 | rds_client = client('rds', region_name=TestEnvironment.get_current().get_aurora_region()) |
237 | 272 | rds_client.modify_db_cluster_endpoint( |
238 | 273 | DBClusterEndpointIdentifier=self.endpoint_id, |
239 | | - StaticMembers=[original_instance_id, new_member] |
| 274 | + StaticMembers=[original_reader_id, writer_id] |
240 | 275 | ) |
241 | 276 |
|
242 | 277 | try: |
243 | | - self.wait_until_endpoint_has_members(rds_client, {original_instance_id, new_member}) |
| 278 | + self.wait_until_endpoint_has_members(rds_client, {original_reader_id, writer_id}) |
244 | 279 |
|
245 | | - # We should now be able to switch to new_member. |
246 | | - conn.read_only = new_read_only_value |
| 280 | + # We should now be able to switch to writer. |
| 281 | + conn.read_only = False |
247 | 282 | new_instance_id = rds_utils.query_instance_id(conn) |
248 | | - assert new_instance_id == new_member |
| 283 | + assert new_instance_id == writer_id |
249 | 284 |
|
250 | 285 | # Switch back to original instance |
251 | | - conn.read_only = not new_read_only_value |
| 286 | + conn.read_only = True |
| 287 | + new_instance_id = rds_utils.query_instance_id(conn) |
| 288 | + assert new_instance_id == original_reader_id |
252 | 289 | finally: |
| 290 | + # Remove the writer from the custom endpoint. |
253 | 291 | rds_client.modify_db_cluster_endpoint( |
254 | 292 | DBClusterEndpointIdentifier=self.endpoint_id, |
255 | | - StaticMembers=[original_instance_id]) |
256 | | - self.wait_until_endpoint_has_members(rds_client, {original_instance_id}) |
| 293 | + StaticMembers=[original_reader_id]) |
| 294 | + self.wait_until_endpoint_has_members(rds_client, {original_reader_id}) |
257 | 295 |
|
258 | 296 | # We should not be able to switch again because new_member was removed from the custom endpoint. |
259 | | - if new_read_only_value: |
260 | | - # We are connected to the writer. Attempting to switch to the reader will not work but will intentionally |
261 | | - # not throw an exception. In this scenario we log a warning and purposefully stick with the writer. |
262 | | - conn.read_only = new_read_only_value |
| 297 | + # We are connected to the reader. Attempting to switch to the writer will throw an exception. |
| 298 | + with pytest.raises(ReadWriteSplittingError): |
| 299 | + conn.read_only = False |
| 300 | + |
| 301 | + conn.close() |
| 302 | + |
| 303 | + def test_custom_endpoint_read_write_splitting__with_custom_endpoint_changes__with_writer_as_init_conn( |
| 304 | + self, test_driver: TestDriver, conn_utils, props, rds_utils): |
| 305 | + ''' |
| 306 | + Will test for the following scenario: |
| 307 | + 1. Iniitially connect to the writer instance via the custom endpoint. |
| 308 | + 2. Attempt to switch to reader instance - should succeed, but will still use writer instance as reader. |
| 309 | + 3. Modify the custom endpoint to add a reader instance as a static member. |
| 310 | + 4. Switch to reader instance - should succeed. |
| 311 | + 5. Switch back to writer instance - should succeed. |
| 312 | + 6. Modify the custom endpoint to remove the reader instance as a static member. |
| 313 | + 7. Attempt to switch to reader instance - should fail since the custom endpoint no longer has the reader instance. |
| 314 | + ''' |
| 315 | + |
| 316 | + target_driver_connect = DriverHelper.get_connect_func(test_driver) |
| 317 | + kwargs = conn_utils.get_connect_params() |
| 318 | + kwargs["host"] = self.endpoint_info["Endpoint"] |
| 319 | + # This setting is not required for the test, but it allows us to also test re-creation of expired monitors since |
| 320 | + # it takes more than 30 seconds to modify the cluster endpoint (usually around 140s). |
| 321 | + props["custom_endpoint_idle_monitor_expiration_ms"] = 30_000 |
| 322 | + props["wait_for_custom_endpoint_info_timeout_ms"] = 30_000 |
| 323 | + |
| 324 | + # Ensure that we are starting with a writer connection |
| 325 | + self._setup_custom_endpoint_role(target_driver_connect, kwargs, rds_utils, HostRole.WRITER) |
| 326 | + conn = AwsWrapperConnection.connect(target_driver_connect, **kwargs, **props) |
| 327 | + |
| 328 | + endpoint_members = self.endpoint_info["StaticMembers"] |
| 329 | + original_writer_id = str(rds_utils.query_instance_id(conn)) |
| 330 | + assert original_writer_id in endpoint_members |
| 331 | + |
| 332 | + # We are connected to the writer. Attempting to switch to the reader will not work but will intentionally |
| 333 | + # not throw an exception. In this scenario we log a warning and purposefully stick with the writer. |
| 334 | + self.logger.debug("Initial connection is to the writer. Attempting to switch to reader...") |
| 335 | + conn.read_only = True |
| 336 | + new_instance_id = rds_utils.query_instance_id(conn) |
| 337 | + assert new_instance_id == original_writer_id |
| 338 | + |
| 339 | + instances = TestEnvironment.get_current().get_instances() |
| 340 | + writer_id = str(rds_utils.get_cluster_writer_instance_id()) |
| 341 | + |
| 342 | + reader_id_to_add = "" |
| 343 | + # Get any reader id |
| 344 | + for instance in instances: |
| 345 | + if instance.get_instance_id() != writer_id: |
| 346 | + reader_id_to_add = instance.get_instance_id() |
| 347 | + break |
| 348 | + |
| 349 | + rds_client = client('rds', region_name=TestEnvironment.get_current().get_aurora_region()) |
| 350 | + rds_client.modify_db_cluster_endpoint( |
| 351 | + DBClusterEndpointIdentifier=self.endpoint_id, |
| 352 | + StaticMembers=[original_writer_id, reader_id_to_add] |
| 353 | + ) |
| 354 | + |
| 355 | + try: |
| 356 | + self.wait_until_endpoint_has_members(rds_client, {original_writer_id, reader_id_to_add}) |
| 357 | + # We should now be able to switch to new_member. |
| 358 | + conn.read_only = True |
263 | 359 | new_instance_id = rds_utils.query_instance_id(conn) |
264 | | - assert new_instance_id == original_instance_id |
265 | | - else: |
266 | | - # We are connected to the reader. Attempting to switch to the writer will throw an exception. |
267 | | - with pytest.raises(ReadWriteSplittingError): |
268 | | - conn.read_only = new_read_only_value |
| 360 | + assert new_instance_id == reader_id_to_add |
| 361 | + |
| 362 | + # Switch back to original instance |
| 363 | + conn.read_only = False |
| 364 | + finally: |
| 365 | + # Remove the reader from the custom endpoint. |
| 366 | + rds_client.modify_db_cluster_endpoint( |
| 367 | + DBClusterEndpointIdentifier=self.endpoint_id, |
| 368 | + StaticMembers=[original_writer_id]) |
| 369 | + self.wait_until_endpoint_has_members(rds_client, {original_writer_id}) |
| 370 | + |
| 371 | + # We should not be able to switch again because new_member was removed from the custom endpoint. |
| 372 | + # We are connected to the writer. Attempting to switch to the reader will not work but will intentionally |
| 373 | + # not throw an exception. In this scenario we log a warning and fallback to the writer. |
| 374 | + conn.read_only = True |
| 375 | + new_instance_id = rds_utils.query_instance_id(conn) |
| 376 | + assert new_instance_id == original_writer_id |
269 | 377 |
|
270 | 378 | conn.close() |
0 commit comments