Loaded libgmt:
  binary dir: C:/Users/seisman/Anaconda3/envs/pygmt
  cores: 1
  grid layout: rows
  library path: C:/programs/gmt6/bin/gmt_w64.dll
  padding: 2
  plugin dir: C:/programs/gmt6/bin/gmt_plugins
  share dir: c:/programs/gmt5/share
  version: 6.0.0
============================= test session starts =============================
platform win32 -- Python 3.6.6, pytest-4.1.1, py-1.7.0, pluggy-0.8.1 -- C:\Users\seisman\Anaconda3\envs\pygmt\python.exe
cachedir: .pytest_cache
Matplotlib: 3.0.2
Freetype: 2.9.1
rootdir: C:\Users\seisman\Desktop, inifile:
plugins: mpl-0.10
collecting ... collected 158 items

base_plotting.py::pygmt.base_plotting.BasePlotting._preprocess <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\base_plotting.py PASSED [  0%]
figure.py::pygmt.figure.Figure <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\figure.py PASSED [  1%]
clib/conversion.py::pygmt.clib.conversion._as_array <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\conversion.py FAILED [  1%]
clib/conversion.py::pygmt.clib.conversion.as_c_contiguous <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\conversion.py PASSED [  2%]
clib/conversion.py::pygmt.clib.conversion.dataarray_to_matrix <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\conversion.py PASSED [  3%]
clib/conversion.py::pygmt.clib.conversion.kwargs_to_ctypes_array <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\conversion.py FAILED [  3%]
clib/conversion.py::pygmt.clib.conversion.vectors_to_arrays <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\conversion.py PASSED [  4%]
clib/session.py::pygmt.clib.session.Session <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\session.py PASSED [  5%]
clib/session.py::pygmt.clib.session.Session._check_dtype_and_dim <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\session.py PASSED [  5%]
clib/session.py::pygmt.clib.session.Session.extract_region <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\session.py PASSED [  6%]
clib/session.py::pygmt.clib.session.Session.get_libgmt_func <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\session.py PASSED [  6%]
clib/session.py::pygmt.clib.session.Session.open_virtual_file <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\session.py PASSED [  7%]
clib/session.py::pygmt.clib.session.Session.virtualfile_from_grid <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\session.py PASSED [  8%]
clib/session.py::pygmt.clib.session.Session.virtualfile_from_matrix <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\session.py PASSED [  8%]
clib/session.py::pygmt.clib.session.Session.virtualfile_from_vectors <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\session.py PASSED [  9%]
datasets/earth_relief.py::pygmt.datasets.earth_relief._is_valid_resolution <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\datasets\earth_relief.py PASSED [ 10%]
datasets/earth_relief.py::pygmt.datasets.earth_relief._shape_from_resolution <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\datasets\earth_relief.py PASSED [ 10%]
helpers/decorators.py::pygmt.helpers.decorators.fmt_docstring <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\helpers\decorators.py PASSED [ 11%]
helpers/decorators.py::pygmt.helpers.decorators.kwargs_to_strings <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\helpers\decorators.py PASSED [ 12%]
helpers/decorators.py::pygmt.helpers.decorators.use_alias <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\helpers\decorators.py PASSED [ 12%]
helpers/tempfile.py::pygmt.helpers.tempfile.GMTTempFile <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\helpers\tempfile.py PASSED [ 13%]
helpers/utils.py::pygmt.helpers.utils.build_arg_string <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\helpers\utils.py PASSED [ 13%]
helpers/utils.py::pygmt.helpers.utils.data_kind <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\helpers\utils.py PASSED [ 14%]
helpers/utils.py::pygmt.helpers.utils.dummy_context <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\helpers\utils.py PASSED [ 15%]
helpers/utils.py::pygmt.helpers.utils.is_nonstr_iter <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\helpers\utils.py PASSED [ 15%]
tests/test_basemap.py::test_basemap_required_args <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_basemap.py PASSED [ 16%]
tests/test_basemap.py::test_basemap <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_basemap.py PASSED [ 17%]
tests/test_basemap.py::test_basemap_list_region <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_basemap.py PASSED [ 17%]
tests/test_basemap.py::test_basemap_loglog <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_basemap.py PASSED [ 18%]
tests/test_basemap.py::test_basemap_power_axis <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_basemap.py PASSED [ 18%]
tests/test_basemap.py::test_basemap_polar <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_basemap.py FAILED [ 19%]
tests/test_basemap.py::test_basemap_winkel_tripel <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_basemap.py FAILED [ 20%]
tests/test_basemap.py::test_basemap_aliases <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_basemap.py FAILED [ 20%]
tests/test_clib.py::test_load_libgmt <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 21%]
tests/test_clib.py::test_load_libgmt_fail <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 22%]
tests/test_clib.py::test_get_clib_path <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 22%]
tests/test_clib.py::test_check_libgmt <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 23%]
tests/test_clib.py::test_clib_name <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 24%]
tests/test_clib.py::test_getitem <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 24%]
tests/test_clib.py::test_create_destroy_session <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 25%]
tests/test_clib.py::test_create_session_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 25%]
tests/test_clib.py::test_destroy_session_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 26%]
tests/test_clib.py::test_call_module <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 27%]
tests/test_clib.py::test_call_module_invalid_arguments <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 27%]
tests/test_clib.py::test_call_module_invalid_name <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 28%]
tests/test_clib.py::test_call_module_error_message <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 29%]
tests/test_clib.py::test_method_no_session <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 29%]
tests/test_clib.py::test_parse_constant_single <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 30%]
tests/test_clib.py::test_parse_constant_composite <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 31%]
tests/test_clib.py::test_parse_constant_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 31%]
tests/test_clib.py::test_create_data_dataset <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 32%]
tests/test_clib.py::test_create_data_grid_dim <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 32%]
tests/test_clib.py::test_create_data_grid_range <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 33%]
tests/test_clib.py::test_create_data_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 34%]
tests/test_clib.py::test_put_vector <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 34%]
tests/test_clib.py::test_put_vector_invalid_dtype <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 35%]
tests/test_clib.py::test_put_vector_wrong_column <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 36%]
tests/test_clib.py::test_put_vector_2d_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 36%]
tests/test_clib.py::test_put_matrix <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 37%]
tests/test_clib.py::test_put_matrix_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 37%]
tests/test_clib.py::test_put_matrix_grid <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 38%]
tests/test_clib.py::test_virtual_file <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 39%]
tests/test_clib.py::test_virtual_file_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 39%]
tests/test_clib.py::test_virtual_file_bad_direction <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 40%]
tests/test_clib.py::test_virtualfile_from_vectors <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 41%]
tests/test_clib.py::test_virtualfile_from_vectors_transpose <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 41%]
tests/test_clib.py::test_virtualfile_from_vectors_diff_size <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 42%]
tests/test_clib.py::test_virtualfile_from_matrix <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 43%]
tests/test_clib.py::test_virtualfile_from_matrix_slice <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 43%]
tests/test_clib.py::test_virtualfile_from_vectors_pandas <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 44%]
tests/test_clib.py::test_virtualfile_from_vectors_arraylike <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 44%]
tests/test_clib.py::test_extract_region_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 45%]
tests/test_clib.py::test_extract_region_two_figures <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 46%]
tests/test_clib.py::test_write_data_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 46%]
tests/test_clib.py::test_dataarray_to_matrix_dims_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 47%]
tests/test_clib.py::test_dataarray_to_matrix_inc_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 48%]
tests/test_clib.py::test_get_default <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 48%]
tests/test_clib.py::test_get_default_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 49%]
tests/test_clib.py::test_info_dict <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 50%]
tests/test_clib.py::test_fails_for_wrong_version <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_clib.py PASSED [ 50%]
tests/test_coast.py::test_coast <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_coast.py FAILED [ 51%]
tests/test_coast.py::test_coast_iceland <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_coast.py FAILED [ 51%]
tests/test_coast.py::test_coast_aliases <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_coast.py FAILED [ 52%]
tests/test_coast.py::test_coast_world_mercator <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_coast.py FAILED [ 53%]
tests/test_contour.py::test_contour_fail_no_data <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_contour.py PASSED [ 53%]
tests/test_contour.py::test_contour_vec <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_contour.py PASSED [ 54%]
tests/test_contour.py::test_contour_matrix <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_contour.py PASSED [ 55%]
tests/test_contour.py::test_contour_from_file <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_contour.py PASSED [ 55%]
tests/test_datasets.py::test_japan_quakes <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_datasets.py PASSED [ 56%]
tests/test_datasets.py::test_sample_bathymetry <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_datasets.py PASSED [ 56%]
tests/test_datasets.py::test_usgs_quakes <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_datasets.py PASSED [ 57%]
tests/test_datasets.py::test_earth_relief_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_datasets.py PASSED [ 58%]
tests/test_datasets.py::test_earth_relief_60 <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_datasets.py PASSED [ 58%]
tests/test_datasets.py::test_earth_relief_30 <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_datasets.py PASSED [ 59%]
tests/test_figure.py::test_figure_region <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_figure.py PASSED [ 60%]
tests/test_figure.py::test_figure_region_multiple <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_figure.py PASSED [ 60%]
tests/test_figure.py::test_figure_region_country_codes <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_figure.py PASSED [ 61%]
tests/test_figure.py::test_figure_savefig_exists <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_figure.py PASSED [ 62%]
tests/test_figure.py::test_figure_savefig_transparent <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_figure.py PASSED [ 62%]
tests/test_figure.py::test_figure_savefig <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_figure.py PASSED [ 63%]
tests/test_figure.py::test_figure_show <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_figure.py PASSED [ 63%]
tests/test_grdcontour.py::test_grdcontour <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_grdcontour.py FAILED [ 64%]
tests/test_grdcontour.py::test_grdcontour_labels <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_grdcontour.py FAILED [ 65%]
tests/test_grdcontour.py::test_grdcontour_slice <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_grdcontour.py FAILED [ 65%]
tests/test_grdcontour.py::test_grdcontour_file <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_grdcontour.py PASSED [ 66%]
tests/test_grdcontour.py::test_grdcontour_interval_file_full_opts <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_grdcontour.py FAILED [ 67%]
tests/test_grdcontour.py::test_grdcontour_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_grdcontour.py PASSED [ 67%]
tests/test_grdimage.py::test_grdimage <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_grdimage.py PASSED [ 68%]
tests/test_grdimage.py::test_grdimage_slice <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_grdimage.py PASSED [ 68%]
tests/test_grdimage.py::test_grdimage_file <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_grdimage.py PASSED [ 69%]
tests/test_grdimage.py::test_grdimage_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_grdimage.py PASSED [ 70%]
tests/test_helpers.py::test_unique_name <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_helpers.py PASSED [ 70%]
tests/test_helpers.py::test_kwargs_to_strings_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_helpers.py PASSED [ 71%]
tests/test_helpers.py::test_kwargs_to_strings_no_bools <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_helpers.py PASSED [ 72%]
tests/test_helpers.py::test_gmttempfile <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_helpers.py PASSED [ 72%]
tests/test_helpers.py::test_gmttempfile_unique <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_helpers.py PASSED [ 73%]
tests/test_helpers.py::test_gmttempfile_prefix_suffix <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_helpers.py PASSED [ 74%]
tests/test_helpers.py::test_gmttempfile_read <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_helpers.py PASSED [ 74%]
tests/test_image.py::test_image <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_image.py PASSED [ 75%]
tests/test_info.py::test_info <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_info.py PASSED [ 75%]
tests/test_info.py::test_info_c <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_info.py PASSED [ 76%]
tests/test_info.py::test_info_i <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_info.py PASSED [ 77%]
tests/test_info.py::test_info_c_i <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_info.py PASSED [ 77%]
tests/test_info.py::test_info_t <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_info.py PASSED [ 78%]
tests/test_info.py::test_info_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_info.py PASSED [ 79%]
tests/test_info.py::test_grdinfo <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_info.py PASSED [ 79%]
tests/test_info.py::test_grdinfo_file <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_info.py PASSED [ 80%]
tests/test_info.py::test_grdinfo_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_info.py PASSED [ 81%]
tests/test_logo.py::test_logo <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_logo.py PASSED [ 81%]
tests/test_logo.py::test_logo_on_a_map <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_logo.py FAILED [ 82%]
tests/test_logo.py::test_logo_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_logo.py PASSED [ 82%]
tests/test_plot.py::test_plot_red_circles <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_plot.py PASSED [ 83%]
tests/test_plot.py::test_plot_fail_no_data <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_plot.py PASSED [ 84%]
tests/test_plot.py::test_plot_fail_size_color <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_plot.py PASSED [ 84%]
tests/test_plot.py::test_plot_projection <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_plot.py FAILED [ 85%]
tests/test_plot.py::test_plot_colors <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_plot.py PASSED [ 86%]
tests/test_plot.py::test_plot_sizes <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_plot.py PASSED [ 86%]
tests/test_plot.py::test_plot_colors_sizes <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_plot.py PASSED [ 87%]
tests/test_plot.py::test_plot_colors_sizes_proj <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_plot.py FAILED [ 87%]
tests/test_plot.py::test_plot_matrix <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_plot.py FAILED [ 88%]
tests/test_plot.py::test_plot_matrix_color <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_plot.py PASSED [ 89%]
tests/test_plot.py::test_plot_from_file <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_plot.py PASSED [ 89%]
tests/test_plot.py::test_plot_vectors <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_plot.py PASSED [ 90%]
tests/test_psconvert.py::test_psconvert <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_psconvert.py PASSED [ 91%]
tests/test_psconvert.py::test_psconvert_twice <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_psconvert.py PASSED [ 91%]
tests/test_psconvert.py::test_psconvert_int_options <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_psconvert.py PASSED [ 92%]
tests/test_psconvert.py::test_psconvert_aliases <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_psconvert.py PASSED [ 93%]
tests/test_session_management.py::test_begin_end <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_session_management.py PASSED [ 93%]
tests/test_sphinx_gallery.py::test_pygmtscraper <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_sphinx_gallery.py SKIPPED [ 94%]
tests/test_surface.py::test_surface_input_file <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_surface.py PASSED [ 94%]
tests/test_surface.py::test_surface_input_data_array <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_surface.py PASSED [ 95%]
tests/test_surface.py::test_surface_input_xyz <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_surface.py PASSED [ 96%]
tests/test_surface.py::test_surface_input_xy_no_z <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_surface.py PASSED [ 96%]
tests/test_surface.py::test_surface_wrong_kind_of_input <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_surface.py PASSED [ 97%]
tests/test_surface.py::test_surface_with_outfile_param <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_surface.py FAILED [ 98%]
tests/test_surface.py::test_surface_short_aliases <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_surface.py FAILED [ 98%]
tests/test_which.py::test_which <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_which.py PASSED [ 99%]
tests/test_which.py::test_which_fails <- ..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_which.py PASSED [100%]

================================== FAILURES ===================================
__________________ [doctest] pygmt.clib.conversion._as_array __________________
214 
215     Examples
216     --------
217 
218     >>> import pandas as pd
219     >>> x_series = pd.Series(data=[1, 2, 3, 4])
220     >>> x_array = _as_array(x_series)
221     >>> type(x_array)
222     <class 'numpy.ndarray'>
223     >>> x_array
Expected:
    array([1, 2, 3, 4])
Got:
    array([1, 2, 3, 4], dtype=int64)

C:\Users\seisman\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\conversion.py:223: DocTestFailure
___________ [doctest] pygmt.clib.conversion.kwargs_to_ctypes_array ____________
256     Returns
257     -------
258     ctypes_value : ctypes array or None
259 
260     Examples
261     --------
262 
263     >>> import ctypes as ct
264     >>> value = kwargs_to_ctypes_array('bla', {'bla': [10, 10]}, ct.c_int*2)
265     >>> type(value)
Expected:
    <class 'pygmt.clib.conversion.c_int_Array_2'>
Got:
    <class 'pygmt.clib.conversion.c_long_Array_2'>

C:\Users\seisman\Anaconda3\envs\pygmt\lib\site-packages\pygmt\clib\conversion.py:265: DocTestFailure
_____________________________ test_basemap_polar ______________________________

args = (), kwargs = {}
baseline_dir = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline'
baseline_remote = False
fig = <pygmt.figure.Figure object at 0x00000180AF7872E8>
filename = 'test_basemap_polar.png'
result_dir = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpn2iws22a'
test_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpn2iws22a\\test_basemap_polar.png'
baseline_image_ref = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline\\test_basemap_polar.png'
baseline_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpn2iws22a\\baseline-test_basemap_polar.png'

    @wraps(item.function)
    def item_function_wrapper(*args, **kwargs):
    
        baseline_dir = compare.kwargs.get('baseline_dir', None)
        if baseline_dir is None:
            if self.baseline_dir is None:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
            else:
                baseline_dir = self.baseline_dir
            baseline_remote = False
        else:
            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
    
        with plt.style.context(style, after_reset=True), switch_backend(backend):
    
            # Run test and get figure object
            if inspect.ismethod(original):  # method
                # In some cases, for example if setup_method is used,
                # original appears to belong to an instance of the test
                # class that is not the same as args[0], and args[0] is the
                # one that has the correct attributes set up from setup_method
                # so we ignore original.__self__ and use args[0] instead.
                fig = original.__func__(*args, **kwargs)
            else:  # function
                fig = original(*args, **kwargs)
    
            if remove_text:
                remove_ticks_and_titles(fig)
    
            # Find test name to use as plot name
            filename = compare.kwargs.get('filename', None)
            if filename is None:
                filename = item.name + '.png'
                filename = filename.replace('[', '_').replace(']', '_')
                filename = filename.replace('/', '_')
                filename = filename.replace('_.png', '.png')
    
            # What we do now depends on whether we are generating the
            # reference images or simply running the test.
            if self.generate_dir is None:
    
                # Save the figure
                result_dir = tempfile.mkdtemp(dir=self.results_dir)
                test_image = os.path.abspath(os.path.join(result_dir, filename))
    
                fig.savefig(test_image, **savefig_kwargs)
                close_mpl_figure(fig)
    
                # Find path to baseline image
                if baseline_remote:
                    baseline_image_ref = _download_file(baseline_dir, filename)
                else:
                    baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
    
                if not os.path.exists(baseline_image_ref):
                    pytest.fail("Image file not found for comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
    
                # distutils may put the baseline images in non-accessible places,
                # copy to our tmpdir to be sure to keep them in case of failure
                baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
                shutil.copyfile(baseline_image_ref, baseline_image)
    
>               msg = compare_images(baseline_image, test_image, tol=tolerance)

..\Anaconda3\envs\pygmt\lib\site-packages\pytest_mpl\plugin.py:275: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:426: in compare_images
    rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

expectedImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)

    def calculate_rms(expectedImage, actualImage):
        "Calculate the per-pixel errors, then compute the root mean square error."
        if expectedImage.shape != actualImage.shape:
            raise ImageComparisonFailure(
                "Image sizes do not match expected size: {} "
>               "actual size {}".format(expectedImage.shape, actualImage.shape))
E           matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1822, 1961, 3) actual size (1822, 1958, 3)

..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:356: ImageComparisonFailure
_________________________ test_basemap_winkel_tripel __________________________

args = (), kwargs = {}
baseline_dir = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline'
baseline_remote = False
fig = <pygmt.figure.Figure object at 0x00000180AF7994E0>
filename = 'test_basemap_winkel_tripel.png'
result_dir = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpxg43abx0'
test_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpxg43abx0\\test_basemap_winkel_tripel.png'
baseline_image_ref = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline\\test_basemap_winkel_tripel.png'
baseline_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpxg43abx0\\baseline-test_basemap_winkel_tripel.png'

    @wraps(item.function)
    def item_function_wrapper(*args, **kwargs):
    
        baseline_dir = compare.kwargs.get('baseline_dir', None)
        if baseline_dir is None:
            if self.baseline_dir is None:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
            else:
                baseline_dir = self.baseline_dir
            baseline_remote = False
        else:
            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
    
        with plt.style.context(style, after_reset=True), switch_backend(backend):
    
            # Run test and get figure object
            if inspect.ismethod(original):  # method
                # In some cases, for example if setup_method is used,
                # original appears to belong to an instance of the test
                # class that is not the same as args[0], and args[0] is the
                # one that has the correct attributes set up from setup_method
                # so we ignore original.__self__ and use args[0] instead.
                fig = original.__func__(*args, **kwargs)
            else:  # function
                fig = original(*args, **kwargs)
    
            if remove_text:
                remove_ticks_and_titles(fig)
    
            # Find test name to use as plot name
            filename = compare.kwargs.get('filename', None)
            if filename is None:
                filename = item.name + '.png'
                filename = filename.replace('[', '_').replace(']', '_')
                filename = filename.replace('/', '_')
                filename = filename.replace('_.png', '.png')
    
            # What we do now depends on whether we are generating the
            # reference images or simply running the test.
            if self.generate_dir is None:
    
                # Save the figure
                result_dir = tempfile.mkdtemp(dir=self.results_dir)
                test_image = os.path.abspath(os.path.join(result_dir, filename))
    
                fig.savefig(test_image, **savefig_kwargs)
                close_mpl_figure(fig)
    
                # Find path to baseline image
                if baseline_remote:
                    baseline_image_ref = _download_file(baseline_dir, filename)
                else:
                    baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
    
                if not os.path.exists(baseline_image_ref):
                    pytest.fail("Image file not found for comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
    
                # distutils may put the baseline images in non-accessible places,
                # copy to our tmpdir to be sure to keep them in case of failure
                baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
                shutil.copyfile(baseline_image_ref, baseline_image)
    
>               msg = compare_images(baseline_image, test_image, tol=tolerance)

..\Anaconda3\envs\pygmt\lib\site-packages\pytest_mpl\plugin.py:275: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:426: in compare_images
    rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

expectedImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)

    def calculate_rms(expectedImage, actualImage):
        "Calculate the per-pixel errors, then compute the root mean square error."
        if expectedImage.shape != actualImage.shape:
            raise ImageComparisonFailure(
                "Image sizes do not match expected size: {} "
>               "actual size {}".format(expectedImage.shape, actualImage.shape))
E           matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1961, 3120, 3) actual size (1958, 3128, 3)

..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:356: ImageComparisonFailure
____________________________ test_basemap_aliases _____________________________

args = (), kwargs = {}
baseline_dir = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline'
baseline_remote = False
fig = <pygmt.figure.Figure object at 0x00000180AFB00390>
filename = 'test_basemap_aliases.png'
result_dir = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpuddkczeh'
test_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpuddkczeh\\test_basemap_aliases.png'
baseline_image_ref = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline\\test_basemap_aliases.png'
baseline_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpuddkczeh\\baseline-test_basemap_aliases.png'

    @wraps(item.function)
    def item_function_wrapper(*args, **kwargs):
    
        baseline_dir = compare.kwargs.get('baseline_dir', None)
        if baseline_dir is None:
            if self.baseline_dir is None:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
            else:
                baseline_dir = self.baseline_dir
            baseline_remote = False
        else:
            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
    
        with plt.style.context(style, after_reset=True), switch_backend(backend):
    
            # Run test and get figure object
            if inspect.ismethod(original):  # method
                # In some cases, for example if setup_method is used,
                # original appears to belong to an instance of the test
                # class that is not the same as args[0], and args[0] is the
                # one that has the correct attributes set up from setup_method
                # so we ignore original.__self__ and use args[0] instead.
                fig = original.__func__(*args, **kwargs)
            else:  # function
                fig = original(*args, **kwargs)
    
            if remove_text:
                remove_ticks_and_titles(fig)
    
            # Find test name to use as plot name
            filename = compare.kwargs.get('filename', None)
            if filename is None:
                filename = item.name + '.png'
                filename = filename.replace('[', '_').replace(']', '_')
                filename = filename.replace('/', '_')
                filename = filename.replace('_.png', '.png')
    
            # What we do now depends on whether we are generating the
            # reference images or simply running the test.
            if self.generate_dir is None:
    
                # Save the figure
                result_dir = tempfile.mkdtemp(dir=self.results_dir)
                test_image = os.path.abspath(os.path.join(result_dir, filename))
    
                fig.savefig(test_image, **savefig_kwargs)
                close_mpl_figure(fig)
    
                # Find path to baseline image
                if baseline_remote:
                    baseline_image_ref = _download_file(baseline_dir, filename)
                else:
                    baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
    
                if not os.path.exists(baseline_image_ref):
                    pytest.fail("Image file not found for comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
    
                # distutils may put the baseline images in non-accessible places,
                # copy to our tmpdir to be sure to keep them in case of failure
                baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
                shutil.copyfile(baseline_image_ref, baseline_image)
    
>               msg = compare_images(baseline_image, test_image, tol=tolerance)

..\Anaconda3\envs\pygmt\lib\site-packages\pytest_mpl\plugin.py:275: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:426: in compare_images
    rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

expectedImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)

    def calculate_rms(expectedImage, actualImage):
        "Calculate the per-pixel errors, then compute the root mean square error."
        if expectedImage.shape != actualImage.shape:
            raise ImageComparisonFailure(
                "Image sizes do not match expected size: {} "
>               "actual size {}".format(expectedImage.shape, actualImage.shape))
E           matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1057, 2267, 3) actual size (1057, 2275, 3)

..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:356: ImageComparisonFailure
_________________________________ test_coast __________________________________

args = (), kwargs = {}
baseline_dir = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline'
baseline_remote = False
fig = <pygmt.figure.Figure object at 0x00000180AFB1FD30>
filename = 'test_coast.png'
result_dir = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpn14i9jpn'
test_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpn14i9jpn\\test_coast.png'
baseline_image_ref = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline\\test_coast.png'
baseline_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpn14i9jpn\\baseline-test_coast.png'

    @wraps(item.function)
    def item_function_wrapper(*args, **kwargs):
    
        baseline_dir = compare.kwargs.get('baseline_dir', None)
        if baseline_dir is None:
            if self.baseline_dir is None:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
            else:
                baseline_dir = self.baseline_dir
            baseline_remote = False
        else:
            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
    
        with plt.style.context(style, after_reset=True), switch_backend(backend):
    
            # Run test and get figure object
            if inspect.ismethod(original):  # method
                # In some cases, for example if setup_method is used,
                # original appears to belong to an instance of the test
                # class that is not the same as args[0], and args[0] is the
                # one that has the correct attributes set up from setup_method
                # so we ignore original.__self__ and use args[0] instead.
                fig = original.__func__(*args, **kwargs)
            else:  # function
                fig = original(*args, **kwargs)
    
            if remove_text:
                remove_ticks_and_titles(fig)
    
            # Find test name to use as plot name
            filename = compare.kwargs.get('filename', None)
            if filename is None:
                filename = item.name + '.png'
                filename = filename.replace('[', '_').replace(']', '_')
                filename = filename.replace('/', '_')
                filename = filename.replace('_.png', '.png')
    
            # What we do now depends on whether we are generating the
            # reference images or simply running the test.
            if self.generate_dir is None:
    
                # Save the figure
                result_dir = tempfile.mkdtemp(dir=self.results_dir)
                test_image = os.path.abspath(os.path.join(result_dir, filename))
    
                fig.savefig(test_image, **savefig_kwargs)
                close_mpl_figure(fig)
    
                # Find path to baseline image
                if baseline_remote:
                    baseline_image_ref = _download_file(baseline_dir, filename)
                else:
                    baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
    
                if not os.path.exists(baseline_image_ref):
                    pytest.fail("Image file not found for comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
    
                # distutils may put the baseline images in non-accessible places,
                # copy to our tmpdir to be sure to keep them in case of failure
                baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
                shutil.copyfile(baseline_image_ref, baseline_image)
    
>               msg = compare_images(baseline_image, test_image, tol=tolerance)

..\Anaconda3\envs\pygmt\lib\site-packages\pytest_mpl\plugin.py:275: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:426: in compare_images
    rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

expectedImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)

    def calculate_rms(expectedImage, actualImage):
        "Calculate the per-pixel errors, then compute the root mean square error."
        if expectedImage.shape != actualImage.shape:
            raise ImageComparisonFailure(
                "Image sizes do not match expected size: {} "
>               "actual size {}".format(expectedImage.shape, actualImage.shape))
E           matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (2768, 2081, 3) actual size (2765, 2089, 3)

..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:356: ImageComparisonFailure
_____________________________ test_coast_iceland ______________________________

args = (), kwargs = {}
baseline_dir = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline'
baseline_remote = False
fig = <pygmt.figure.Figure object at 0x00000180AFB23048>
filename = 'test_coast_iceland.png'
result_dir = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpe6azbszr'
test_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpe6azbszr\\test_coast_iceland.png'
baseline_image_ref = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline\\test_coast_iceland.png'
baseline_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpe6azbszr\\baseline-test_coast_iceland.png'

    @wraps(item.function)
    def item_function_wrapper(*args, **kwargs):
    
        baseline_dir = compare.kwargs.get('baseline_dir', None)
        if baseline_dir is None:
            if self.baseline_dir is None:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
            else:
                baseline_dir = self.baseline_dir
            baseline_remote = False
        else:
            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
    
        with plt.style.context(style, after_reset=True), switch_backend(backend):
    
            # Run test and get figure object
            if inspect.ismethod(original):  # method
                # In some cases, for example if setup_method is used,
                # original appears to belong to an instance of the test
                # class that is not the same as args[0], and args[0] is the
                # one that has the correct attributes set up from setup_method
                # so we ignore original.__self__ and use args[0] instead.
                fig = original.__func__(*args, **kwargs)
            else:  # function
                fig = original(*args, **kwargs)
    
            if remove_text:
                remove_ticks_and_titles(fig)
    
            # Find test name to use as plot name
            filename = compare.kwargs.get('filename', None)
            if filename is None:
                filename = item.name + '.png'
                filename = filename.replace('[', '_').replace(']', '_')
                filename = filename.replace('/', '_')
                filename = filename.replace('_.png', '.png')
    
            # What we do now depends on whether we are generating the
            # reference images or simply running the test.
            if self.generate_dir is None:
    
                # Save the figure
                result_dir = tempfile.mkdtemp(dir=self.results_dir)
                test_image = os.path.abspath(os.path.join(result_dir, filename))
    
                fig.savefig(test_image, **savefig_kwargs)
                close_mpl_figure(fig)
    
                # Find path to baseline image
                if baseline_remote:
                    baseline_image_ref = _download_file(baseline_dir, filename)
                else:
                    baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
    
                if not os.path.exists(baseline_image_ref):
                    pytest.fail("Image file not found for comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
    
                # distutils may put the baseline images in non-accessible places,
                # copy to our tmpdir to be sure to keep them in case of failure
                baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
                shutil.copyfile(baseline_image_ref, baseline_image)
    
>               msg = compare_images(baseline_image, test_image, tol=tolerance)

..\Anaconda3\envs\pygmt\lib\site-packages\pytest_mpl\plugin.py:275: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:426: in compare_images
    rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

expectedImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)

    def calculate_rms(expectedImage, actualImage):
        "Calculate the per-pixel errors, then compute the root mean square error."
        if expectedImage.shape != actualImage.shape:
            raise ImageComparisonFailure(
                "Image sizes do not match expected size: {} "
>               "actual size {}".format(expectedImage.shape, actualImage.shape))
E           matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1441, 2585, 3) actual size (1438, 2593, 3)

..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:356: ImageComparisonFailure
_____________________________ test_coast_aliases ______________________________

args = (), kwargs = {}
baseline_dir = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline'
baseline_remote = False
fig = <pygmt.figure.Figure object at 0x00000180AF7994A8>
filename = 'test_coast_aliases.png'
result_dir = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpigmb383z'
test_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpigmb383z\\test_coast_aliases.png'
baseline_image_ref = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline\\test_coast_aliases.png'
baseline_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpigmb383z\\baseline-test_coast_aliases.png'

    @wraps(item.function)
    def item_function_wrapper(*args, **kwargs):
    
        baseline_dir = compare.kwargs.get('baseline_dir', None)
        if baseline_dir is None:
            if self.baseline_dir is None:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
            else:
                baseline_dir = self.baseline_dir
            baseline_remote = False
        else:
            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
    
        with plt.style.context(style, after_reset=True), switch_backend(backend):
    
            # Run test and get figure object
            if inspect.ismethod(original):  # method
                # In some cases, for example if setup_method is used,
                # original appears to belong to an instance of the test
                # class that is not the same as args[0], and args[0] is the
                # one that has the correct attributes set up from setup_method
                # so we ignore original.__self__ and use args[0] instead.
                fig = original.__func__(*args, **kwargs)
            else:  # function
                fig = original(*args, **kwargs)
    
            if remove_text:
                remove_ticks_and_titles(fig)
    
            # Find test name to use as plot name
            filename = compare.kwargs.get('filename', None)
            if filename is None:
                filename = item.name + '.png'
                filename = filename.replace('[', '_').replace(']', '_')
                filename = filename.replace('/', '_')
                filename = filename.replace('_.png', '.png')
    
            # What we do now depends on whether we are generating the
            # reference images or simply running the test.
            if self.generate_dir is None:
    
                # Save the figure
                result_dir = tempfile.mkdtemp(dir=self.results_dir)
                test_image = os.path.abspath(os.path.join(result_dir, filename))
    
                fig.savefig(test_image, **savefig_kwargs)
                close_mpl_figure(fig)
    
                # Find path to baseline image
                if baseline_remote:
                    baseline_image_ref = _download_file(baseline_dir, filename)
                else:
                    baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
    
                if not os.path.exists(baseline_image_ref):
                    pytest.fail("Image file not found for comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
    
                # distutils may put the baseline images in non-accessible places,
                # copy to our tmpdir to be sure to keep them in case of failure
                baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
                shutil.copyfile(baseline_image_ref, baseline_image)
    
>               msg = compare_images(baseline_image, test_image, tol=tolerance)

..\Anaconda3\envs\pygmt\lib\site-packages\pytest_mpl\plugin.py:275: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:426: in compare_images
    rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

expectedImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)

    def calculate_rms(expectedImage, actualImage):
        "Calculate the per-pixel errors, then compute the root mean square error."
        if expectedImage.shape != actualImage.shape:
            raise ImageComparisonFailure(
                "Image sizes do not match expected size: {} "
>               "actual size {}".format(expectedImage.shape, actualImage.shape))
E           matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (2768, 2081, 3) actual size (2765, 2089, 3)

..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:356: ImageComparisonFailure
__________________________ test_coast_world_mercator __________________________

args = (), kwargs = {}
baseline_dir = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline'
baseline_remote = False
fig = <pygmt.figure.Figure object at 0x00000180AFB874E0>
filename = 'test_coast_world_mercator.png'
result_dir = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmp__2mlqs5'
test_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmp__2mlqs5\\test_coast_world_mercator.png'
baseline_image_ref = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline\\test_coast_world_mercator.png'
baseline_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmp__2mlqs5\\baseline-test_coast_world_mercator.png'

    @wraps(item.function)
    def item_function_wrapper(*args, **kwargs):
    
        baseline_dir = compare.kwargs.get('baseline_dir', None)
        if baseline_dir is None:
            if self.baseline_dir is None:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
            else:
                baseline_dir = self.baseline_dir
            baseline_remote = False
        else:
            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
    
        with plt.style.context(style, after_reset=True), switch_backend(backend):
    
            # Run test and get figure object
            if inspect.ismethod(original):  # method
                # In some cases, for example if setup_method is used,
                # original appears to belong to an instance of the test
                # class that is not the same as args[0], and args[0] is the
                # one that has the correct attributes set up from setup_method
                # so we ignore original.__self__ and use args[0] instead.
                fig = original.__func__(*args, **kwargs)
            else:  # function
                fig = original(*args, **kwargs)
    
            if remove_text:
                remove_ticks_and_titles(fig)
    
            # Find test name to use as plot name
            filename = compare.kwargs.get('filename', None)
            if filename is None:
                filename = item.name + '.png'
                filename = filename.replace('[', '_').replace(']', '_')
                filename = filename.replace('/', '_')
                filename = filename.replace('_.png', '.png')
    
            # What we do now depends on whether we are generating the
            # reference images or simply running the test.
            if self.generate_dir is None:
    
                # Save the figure
                result_dir = tempfile.mkdtemp(dir=self.results_dir)
                test_image = os.path.abspath(os.path.join(result_dir, filename))
    
                fig.savefig(test_image, **savefig_kwargs)
                close_mpl_figure(fig)
    
                # Find path to baseline image
                if baseline_remote:
                    baseline_image_ref = _download_file(baseline_dir, filename)
                else:
                    baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
    
                if not os.path.exists(baseline_image_ref):
                    pytest.fail("Image file not found for comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
    
                # distutils may put the baseline images in non-accessible places,
                # copy to our tmpdir to be sure to keep them in case of failure
                baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
                shutil.copyfile(baseline_image_ref, baseline_image)
    
>               msg = compare_images(baseline_image, test_image, tol=tolerance)

..\Anaconda3\envs\pygmt\lib\site-packages\pytest_mpl\plugin.py:275: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:426: in compare_images
    rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

expectedImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)

    def calculate_rms(expectedImage, actualImage):
        "Calculate the per-pixel errors, then compute the root mean square error."
        if expectedImage.shape != actualImage.shape:
            raise ImageComparisonFailure(
                "Image sizes do not match expected size: {} "
>               "actual size {}".format(expectedImage.shape, actualImage.shape))
E           matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (2480, 3281, 3) actual size (2477, 3289, 3)

..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:356: ImageComparisonFailure
_______________________________ test_grdcontour _______________________________
Error: Image files did not match.
  RMS Value: 3.9133396446321216
  Expected:  
    C:\Users\seisman\AppData\Local\Temp\tmpkj_i8_1z\baseline-test_grdcontour.png
  Actual:    
    C:\Users\seisman\AppData\Local\Temp\tmpkj_i8_1z\test_grdcontour.png
  Difference:
    C:\Users\seisman\AppData\Local\Temp\tmpkj_i8_1z\test_grdcontour-failed-diff.png
  Tolerance: 
    2
___________________________ test_grdcontour_labels ____________________________
Error: Image files did not match.
  RMS Value: 25.282257700227564
  Expected:  
    C:\Users\seisman\AppData\Local\Temp\tmprx6u4lay\baseline-test_grdcontour_labels.png
  Actual:    
    C:\Users\seisman\AppData\Local\Temp\tmprx6u4lay\test_grdcontour_labels.png
  Difference:
    C:\Users\seisman\AppData\Local\Temp\tmprx6u4lay\test_grdcontour_labels-failed-diff.png
  Tolerance: 
    2
____________________________ test_grdcontour_slice ____________________________
Error: Image files did not match.
  RMS Value: 2.1314405328071517
  Expected:  
    C:\Users\seisman\AppData\Local\Temp\tmpzpf4pbml\baseline-test_grdcontour_slice.png
  Actual:    
    C:\Users\seisman\AppData\Local\Temp\tmpzpf4pbml\test_grdcontour_slice.png
  Difference:
    C:\Users\seisman\AppData\Local\Temp\tmpzpf4pbml\test_grdcontour_slice-failed-diff.png
  Tolerance: 
    2
___________________ test_grdcontour_interval_file_full_opts ___________________
Error: Image files did not match.
  RMS Value: 12.615375102338323
  Expected:  
    C:\Users\seisman\AppData\Local\Temp\tmpspp96rt5\baseline-test_grdcontour_interval_file_full_opts.png
  Actual:    
    C:\Users\seisman\AppData\Local\Temp\tmpspp96rt5\test_grdcontour_interval_file_full_opts.png
  Difference:
    C:\Users\seisman\AppData\Local\Temp\tmpspp96rt5\test_grdcontour_interval_file_full_opts-failed-diff.png
  Tolerance: 
    2
_____________________________ test_logo_on_a_map ______________________________

args = (), kwargs = {}
baseline_dir = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline'
baseline_remote = False
fig = <pygmt.figure.Figure object at 0x00000180AFBB3F28>
filename = 'test_logo_on_a_map.png'
result_dir = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpl4fay6z0'
test_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpl4fay6z0\\test_logo_on_a_map.png'
baseline_image_ref = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline\\test_logo_on_a_map.png'
baseline_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpl4fay6z0\\baseline-test_logo_on_a_map.png'

    @wraps(item.function)
    def item_function_wrapper(*args, **kwargs):
    
        baseline_dir = compare.kwargs.get('baseline_dir', None)
        if baseline_dir is None:
            if self.baseline_dir is None:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
            else:
                baseline_dir = self.baseline_dir
            baseline_remote = False
        else:
            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
    
        with plt.style.context(style, after_reset=True), switch_backend(backend):
    
            # Run test and get figure object
            if inspect.ismethod(original):  # method
                # In some cases, for example if setup_method is used,
                # original appears to belong to an instance of the test
                # class that is not the same as args[0], and args[0] is the
                # one that has the correct attributes set up from setup_method
                # so we ignore original.__self__ and use args[0] instead.
                fig = original.__func__(*args, **kwargs)
            else:  # function
                fig = original(*args, **kwargs)
    
            if remove_text:
                remove_ticks_and_titles(fig)
    
            # Find test name to use as plot name
            filename = compare.kwargs.get('filename', None)
            if filename is None:
                filename = item.name + '.png'
                filename = filename.replace('[', '_').replace(']', '_')
                filename = filename.replace('/', '_')
                filename = filename.replace('_.png', '.png')
    
            # What we do now depends on whether we are generating the
            # reference images or simply running the test.
            if self.generate_dir is None:
    
                # Save the figure
                result_dir = tempfile.mkdtemp(dir=self.results_dir)
                test_image = os.path.abspath(os.path.join(result_dir, filename))
    
                fig.savefig(test_image, **savefig_kwargs)
                close_mpl_figure(fig)
    
                # Find path to baseline image
                if baseline_remote:
                    baseline_image_ref = _download_file(baseline_dir, filename)
                else:
                    baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
    
                if not os.path.exists(baseline_image_ref):
                    pytest.fail("Image file not found for comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
    
                # distutils may put the baseline images in non-accessible places,
                # copy to our tmpdir to be sure to keep them in case of failure
                baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
                shutil.copyfile(baseline_image_ref, baseline_image)
    
>               msg = compare_images(baseline_image, test_image, tol=tolerance)

..\Anaconda3\envs\pygmt\lib\site-packages\pytest_mpl\plugin.py:275: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:426: in compare_images
    rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

expectedImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)

    def calculate_rms(expectedImage, actualImage):
        "Calculate the per-pixel errors, then compute the root mean square error."
        if expectedImage.shape != actualImage.shape:
            raise ImageComparisonFailure(
                "Image sizes do not match expected size: {} "
>               "actual size {}".format(expectedImage.shape, actualImage.shape))
E           matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1986, 2023, 3) actual size (1983, 2031, 3)

..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:356: ImageComparisonFailure
____________________________ test_plot_projection _____________________________

args = ()
kwargs = {'data': array([[43.4847,  0.6227,  0.5309],
       [22.331 ,  3.7556,  0.3817],
       [40.8023,  5.5903,  0.7764],
 ... 0.7622],
       [61.7074,  1.4425,  0.4305],
       [28.1125,  3.8456,  0.9338],
       [47.8333, -0.7225,  0.5969]])}
baseline_dir = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline'
baseline_remote = False
fig = <pygmt.figure.Figure object at 0x00000180ADA7D518>
filename = 'test_plot_projection.png'
result_dir = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpd85vjqrf'
test_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpd85vjqrf\\test_plot_projection.png'
baseline_image_ref = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline\\test_plot_projection.png'
baseline_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpd85vjqrf\\baseline-test_plot_projection.png'

    @wraps(item.function)
    def item_function_wrapper(*args, **kwargs):
    
        baseline_dir = compare.kwargs.get('baseline_dir', None)
        if baseline_dir is None:
            if self.baseline_dir is None:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
            else:
                baseline_dir = self.baseline_dir
            baseline_remote = False
        else:
            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
    
        with plt.style.context(style, after_reset=True), switch_backend(backend):
    
            # Run test and get figure object
            if inspect.ismethod(original):  # method
                # In some cases, for example if setup_method is used,
                # original appears to belong to an instance of the test
                # class that is not the same as args[0], and args[0] is the
                # one that has the correct attributes set up from setup_method
                # so we ignore original.__self__ and use args[0] instead.
                fig = original.__func__(*args, **kwargs)
            else:  # function
                fig = original(*args, **kwargs)
    
            if remove_text:
                remove_ticks_and_titles(fig)
    
            # Find test name to use as plot name
            filename = compare.kwargs.get('filename', None)
            if filename is None:
                filename = item.name + '.png'
                filename = filename.replace('[', '_').replace(']', '_')
                filename = filename.replace('/', '_')
                filename = filename.replace('_.png', '.png')
    
            # What we do now depends on whether we are generating the
            # reference images or simply running the test.
            if self.generate_dir is None:
    
                # Save the figure
                result_dir = tempfile.mkdtemp(dir=self.results_dir)
                test_image = os.path.abspath(os.path.join(result_dir, filename))
    
                fig.savefig(test_image, **savefig_kwargs)
                close_mpl_figure(fig)
    
                # Find path to baseline image
                if baseline_remote:
                    baseline_image_ref = _download_file(baseline_dir, filename)
                else:
                    baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
    
                if not os.path.exists(baseline_image_ref):
                    pytest.fail("Image file not found for comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
    
                # distutils may put the baseline images in non-accessible places,
                # copy to our tmpdir to be sure to keep them in case of failure
                baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
                shutil.copyfile(baseline_image_ref, baseline_image)
    
>               msg = compare_images(baseline_image, test_image, tol=tolerance)

..\Anaconda3\envs\pygmt\lib\site-packages\pytest_mpl\plugin.py:275: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:426: in compare_images
    rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

expectedImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)

    def calculate_rms(expectedImage, actualImage):
        "Calculate the per-pixel errors, then compute the root mean square error."
        if expectedImage.shape != actualImage.shape:
            raise ImageComparisonFailure(
                "Image sizes do not match expected size: {} "
>               "actual size {}".format(expectedImage.shape, actualImage.shape))
E           matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (894, 1367, 3) actual size (891, 1375, 3)

..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:356: ImageComparisonFailure
_________________________ test_plot_colors_sizes_proj _________________________

args = ()
kwargs = {'data': array([[43.4847,  0.6227,  0.5309],
       [22.331 ,  3.7556,  0.3817],
       [40.8023,  5.5903,  0.7764],
 ...1.4425,  0.4305],
       [28.1125,  3.8456,  0.9338],
       [47.8333, -0.7225,  0.5969]]), 'region': [10, 70, -5, 10]}
baseline_dir = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline'
baseline_remote = False
fig = <pygmt.figure.Figure object at 0x00000180AFB95E48>
filename = 'test_plot_colors_sizes_proj.png'
result_dir = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpaac4kfjv'
test_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpaac4kfjv\\test_plot_colors_sizes_proj.png'
baseline_image_ref = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline\\test_plot_colors_sizes_proj.png'
baseline_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmpaac4kfjv\\baseline-test_plot_colors_sizes_proj.png'

    @wraps(item.function)
    def item_function_wrapper(*args, **kwargs):
    
        baseline_dir = compare.kwargs.get('baseline_dir', None)
        if baseline_dir is None:
            if self.baseline_dir is None:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
            else:
                baseline_dir = self.baseline_dir
            baseline_remote = False
        else:
            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
    
        with plt.style.context(style, after_reset=True), switch_backend(backend):
    
            # Run test and get figure object
            if inspect.ismethod(original):  # method
                # In some cases, for example if setup_method is used,
                # original appears to belong to an instance of the test
                # class that is not the same as args[0], and args[0] is the
                # one that has the correct attributes set up from setup_method
                # so we ignore original.__self__ and use args[0] instead.
                fig = original.__func__(*args, **kwargs)
            else:  # function
                fig = original(*args, **kwargs)
    
            if remove_text:
                remove_ticks_and_titles(fig)
    
            # Find test name to use as plot name
            filename = compare.kwargs.get('filename', None)
            if filename is None:
                filename = item.name + '.png'
                filename = filename.replace('[', '_').replace(']', '_')
                filename = filename.replace('/', '_')
                filename = filename.replace('_.png', '.png')
    
            # What we do now depends on whether we are generating the
            # reference images or simply running the test.
            if self.generate_dir is None:
    
                # Save the figure
                result_dir = tempfile.mkdtemp(dir=self.results_dir)
                test_image = os.path.abspath(os.path.join(result_dir, filename))
    
                fig.savefig(test_image, **savefig_kwargs)
                close_mpl_figure(fig)
    
                # Find path to baseline image
                if baseline_remote:
                    baseline_image_ref = _download_file(baseline_dir, filename)
                else:
                    baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
    
                if not os.path.exists(baseline_image_ref):
                    pytest.fail("Image file not found for comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
    
                # distutils may put the baseline images in non-accessible places,
                # copy to our tmpdir to be sure to keep them in case of failure
                baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
                shutil.copyfile(baseline_image_ref, baseline_image)
    
>               msg = compare_images(baseline_image, test_image, tol=tolerance)

..\Anaconda3\envs\pygmt\lib\site-packages\pytest_mpl\plugin.py:275: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:426: in compare_images
    rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

expectedImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)

    def calculate_rms(expectedImage, actualImage):
        "Calculate the per-pixel errors, then compute the root mean square error."
        if expectedImage.shape != actualImage.shape:
            raise ImageComparisonFailure(
                "Image sizes do not match expected size: {} "
>               "actual size {}".format(expectedImage.shape, actualImage.shape))
E           matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (908, 3225, 3) actual size (905, 3234, 3)

..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:356: ImageComparisonFailure
______________________________ test_plot_matrix _______________________________

args = ()
kwargs = {'data': array([[43.4847,  0.6227,  0.5309],
       [22.331 ,  3.7556,  0.3817],
       [40.8023,  5.5903,  0.7764],
 ... 0.7622],
       [61.7074,  1.4425,  0.4305],
       [28.1125,  3.8456,  0.9338],
       [47.8333, -0.7225,  0.5969]])}
baseline_dir = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline'
baseline_remote = False
fig = <pygmt.figure.Figure object at 0x00000180AF86F198>
filename = 'test_plot_matrix.png'
result_dir = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmp32l0smu9'
test_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmp32l0smu9\\test_plot_matrix.png'
baseline_image_ref = 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\baseline\\test_plot_matrix.png'
baseline_image = 'C:\\Users\\seisman\\AppData\\Local\\Temp\\tmp32l0smu9\\baseline-test_plot_matrix.png'

    @wraps(item.function)
    def item_function_wrapper(*args, **kwargs):
    
        baseline_dir = compare.kwargs.get('baseline_dir', None)
        if baseline_dir is None:
            if self.baseline_dir is None:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
            else:
                baseline_dir = self.baseline_dir
            baseline_remote = False
        else:
            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
    
        with plt.style.context(style, after_reset=True), switch_backend(backend):
    
            # Run test and get figure object
            if inspect.ismethod(original):  # method
                # In some cases, for example if setup_method is used,
                # original appears to belong to an instance of the test
                # class that is not the same as args[0], and args[0] is the
                # one that has the correct attributes set up from setup_method
                # so we ignore original.__self__ and use args[0] instead.
                fig = original.__func__(*args, **kwargs)
            else:  # function
                fig = original(*args, **kwargs)
    
            if remove_text:
                remove_ticks_and_titles(fig)
    
            # Find test name to use as plot name
            filename = compare.kwargs.get('filename', None)
            if filename is None:
                filename = item.name + '.png'
                filename = filename.replace('[', '_').replace(']', '_')
                filename = filename.replace('/', '_')
                filename = filename.replace('_.png', '.png')
    
            # What we do now depends on whether we are generating the
            # reference images or simply running the test.
            if self.generate_dir is None:
    
                # Save the figure
                result_dir = tempfile.mkdtemp(dir=self.results_dir)
                test_image = os.path.abspath(os.path.join(result_dir, filename))
    
                fig.savefig(test_image, **savefig_kwargs)
                close_mpl_figure(fig)
    
                # Find path to baseline image
                if baseline_remote:
                    baseline_image_ref = _download_file(baseline_dir, filename)
                else:
                    baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
    
                if not os.path.exists(baseline_image_ref):
                    pytest.fail("Image file not found for comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
    
                # distutils may put the baseline images in non-accessible places,
                # copy to our tmpdir to be sure to keep them in case of failure
                baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
                shutil.copyfile(baseline_image_ref, baseline_image)
    
>               msg = compare_images(baseline_image, test_image, tol=tolerance)

..\Anaconda3\envs\pygmt\lib\site-packages\pytest_mpl\plugin.py:275: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:426: in compare_images
    rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

expectedImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
     ...[255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=int16)

    def calculate_rms(expectedImage, actualImage):
        "Calculate the per-pixel errors, then compute the root mean square error."
        if expectedImage.shape != actualImage.shape:
            raise ImageComparisonFailure(
                "Image sizes do not match expected size: {} "
>               "actual size {}".format(expectedImage.shape, actualImage.shape))
E           matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (908, 3225, 3) actual size (905, 3234, 3)

..\Anaconda3\envs\pygmt\lib\site-packages\matplotlib\testing\compare.py:356: ImageComparisonFailure
_______________________ test_surface_with_outfile_param _______________________

    def test_surface_with_outfile_param():
        """
        Run surface with the -Goutputfile.nc parameter
        """
        ship_data = load_sample_bathymetry()
        data = ship_data.values  # convert pandas.DataFrame to numpy.ndarray
        try:
            output = surface(
                data=data, spacing="5m", region="245/255/20/30", outfile=TEMP_GRID
            )
            assert output is None  # check that output is None since outfile is set
            assert os.path.exists(path=TEMP_GRID)  # check that outfile exists at path
            grid = xr.open_dataset(TEMP_GRID)
            assert isinstance(grid, xr.Dataset)  # check that netcdf grid loaded properly
        finally:
>           os.remove(path=TEMP_GRID)
E           PermissionError: [WinError 32] ��һ����������ʹ�ô��ļ��������޷����ʡ�: 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\data\\tmp_grid.nc'

..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_surface.py:96: PermissionError
_________________________ test_surface_short_aliases __________________________

    def test_surface_short_aliases():
        """
        Run surface using short aliases -I for spacing, -R for region, -G for outfile
        """
        ship_data = load_sample_bathymetry()
        data = ship_data.values  # convert pandas.DataFrame to numpy.ndarray
        try:
            output = surface(data=data, I="5m", R="245/255/20/30", G=TEMP_GRID)
            assert output is None  # check that output is None since outfile is set
            assert os.path.exists(path=TEMP_GRID)  # check that outfile exists at path
            grid = xr.open_dataset(TEMP_GRID)
            assert isinstance(grid, xr.Dataset)  # check that netcdf grid loaded properly
        finally:
>           os.remove(path=TEMP_GRID)
E           PermissionError: [WinError 32] ��һ����������ʹ�ô��ļ��������޷����ʡ�: 'C:\\Users\\seisman\\Anaconda3\\envs\\pygmt\\lib\\site-packages\\pygmt\\tests\\data\\tmp_grid.nc'

..\Anaconda3\envs\pygmt\lib\site-packages\pygmt\tests\test_surface.py:113: PermissionError
============================== warnings summary ===============================
tests/test_datasets.py::test_japan_quakes
  C:\Users\seisman\Anaconda3\envs\pygmt\lib\site-packages\pygmt\datasets\tutorial.py:28: FutureWarning: read_table is deprecated, use read_csv instead.
    data = pd.read_table(fname, header=1, sep=r"\s+")

-- Docs: https://docs.pytest.org/en/latest/warnings.html
======== 19 failed, 138 passed, 1 skipped, 1 warnings in 68.48 seconds ========
